repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
seg/2016-ml-contest | geoLEARN/Feature_Engineering.py | 2 | 12059 | # -*- coding: utf-8 -*-
"""
Created on Fri Dec 16 09:15:54 2016
@author: Antoine Caté
"""
import pandas as pd
import numpy as np
###### Import packages needed for the make_vars functions
from scipy.interpolate import interp1d
import pywt
from skimage.filters.rank import entropy
from skimage.morphology import rectangle
from skimage.util import img_as_ubyte
def make_dwt_vars_cD(wells_df,logs,levels,wavelet):
wave= pywt.Wavelet(wavelet)
grouped = wells_df.groupby(['Well Name'])
new_df = pd.DataFrame()
for key in grouped.groups.keys():
depth = grouped.get_group(key)['Depth']
temp_df = pd.DataFrame()
temp_df['Depth'] = depth
for log in logs:
temp_data = grouped.get_group(key)[log]
cA_4, cD_4, cD_3, cD_2, cD_1 = pywt.wavedec(temp_data,wave,level=4,mode='symmetric')
dict_cD_levels = {1:cD_1, 2:cD_2, 3:cD_3, 4:cD_4}
for i in levels:
new_depth = np.linspace(min(depth),max(depth),len(dict_cD_levels[i]))
fA = interp1d(new_depth,dict_cD_levels[i],kind='nearest')
temp_df[log + '_cD_level_' + str(i)] = fA(depth)
new_df = new_df.append(temp_df)
new_df = new_df.sort_index()
new_df = new_df.drop(['Depth'],axis=1)
return new_df
def make_dwt_vars_cA(wells_df,logs,levels,wavelet):
wave= pywt.Wavelet(wavelet)
grouped = wells_df.groupby(['Well Name'])
new_df = pd.DataFrame()
for key in grouped.groups.keys():
depth = grouped.get_group(key)['Depth']
temp_df = pd.DataFrame()
temp_df['Depth'] = depth
for log in logs:
temp_data = grouped.get_group(key)[log]
for i in levels:
cA_cD = pywt.wavedec(temp_data,wave,level=i,mode='symmetric')
cA = cA_cD[0]
new_depth = np.linspace(min(depth),max(depth),len(cA))
fA = interp1d(new_depth,cA,kind='nearest')
temp_df[log + '_cA_level_' + str(i)] = fA(depth)
new_df = new_df.append(temp_df)
new_df = new_df.sort_index()
new_df = new_df.drop(['Depth'],axis=1)
return new_df
def make_entropy_vars(wells_df,logs,l_foots):
new_df = pd.DataFrame()
grouped = wells_df.groupby(['Well Name'])
for key in grouped.groups.keys():
depth = grouped.get_group(key)['Depth']
temp_df = pd.DataFrame()
temp_df['Depth'] = depth
for log in logs:
temp_data = grouped.get_group(key)[log]
image = np.vstack((temp_data,temp_data,temp_data))
image -= np.median(image)
image /= np.max(np.abs(image))
image = img_as_ubyte(image)
for l_foot in l_foots:
footprint = rectangle(l_foot,3)
temp_df[log + '_entropy_foot' + str(l_foot)] = entropy(image,footprint)[0,:]
new_df = new_df.append(temp_df)
new_df = new_df.sort_index()
new_df = new_df.drop(['Depth'],axis=1)
return new_df
def make_gradient_vars(wells_df,logs,dx_list):
new_df = pd.DataFrame()
grouped = wells_df.groupby(['Well Name'])
for key in grouped.groups.keys():
depth = grouped.get_group(key)['Depth']
temp_df = pd.DataFrame()
temp_df['Depth'] = depth
for log in logs:
temp_data = grouped.get_group(key)[log]
for dx in dx_list:
temp_df[log + 'gradient_dx' + str(dx)] = np.gradient(temp_data,dx)
new_df = new_df.append(temp_df)
new_df = new_df.sort_index()
new_df = new_df.drop(['Depth'],axis=1)
return new_df
def make_moving_av_vars(wells_df,logs,windows):
new_df = pd.DataFrame()
grouped = wells_df.groupby(['Well Name'])
for key in grouped.groups.keys():
temp_df = pd.DataFrame()
temp_df['Depth'] = grouped.get_group(key)['Depth']
for log in logs:
temp_data = grouped.get_group(key)[log]
for window in windows:
temp_df[log + '_moving_av_' + str(window) + 'ft'] = pd.rolling_mean(arg=temp_data, window=window, min_periods=1, center=True)
new_df = new_df.append(temp_df)
new_df = new_df.sort_index()
new_df = new_df.drop(['Depth'],axis=1)
return new_df
def make_moving_std_vars(wells_df,logs,windows):
new_df = pd.DataFrame()
grouped = wells_df.groupby(['Well Name'])
for key in grouped.groups.keys():
temp_df = pd.DataFrame()
temp_df['Depth'] = grouped.get_group(key)['Depth']
for log in logs:
temp_data = grouped.get_group(key)[log]
for window in windows:
temp_df[log + '_moving_std_' + str(window) + 'ft'] = pd.rolling_std(arg=temp_data, window=window, min_periods=1, center=True)
new_df = new_df.append(temp_df)
new_df = new_df.sort_index()
new_df = new_df.drop(['Depth'],axis=1)
return new_df
def make_moving_max_vars(wells_df,logs,windows):
new_df = pd.DataFrame()
grouped = wells_df.groupby(['Well Name'])
for key in grouped.groups.keys():
temp_df = pd.DataFrame()
temp_df['Depth'] = grouped.get_group(key)['Depth']
for log in logs:
temp_data = grouped.get_group(key)[log]
for window in windows:
temp_df[log + '_moving_max_' + str(window) + 'ft'] = pd.rolling_max(arg=temp_data, window=window, min_periods=1, center=True)
new_df = new_df.append(temp_df)
new_df = new_df.sort_index()
new_df = new_df.drop(['Depth'],axis=1)
return new_df
def make_moving_min_vars(wells_df,logs,windows):
new_df = pd.DataFrame()
grouped = wells_df.groupby(['Well Name'])
for key in grouped.groups.keys():
temp_df = pd.DataFrame()
temp_df['Depth'] = grouped.get_group(key)['Depth']
for log in logs:
temp_data = grouped.get_group(key)[log]
for window in windows:
temp_df[log + '_moving_min_' + str(window) + 'ft'] = pd.rolling_min(arg=temp_data, window=window, min_periods=1, center=True)
new_df = new_df.append(temp_df)
new_df = new_df.sort_index()
new_df = new_df.drop(['Depth'],axis=1)
return new_df
def make_rolling_marine_ratio_vars(wells_df, windows):
grouped = wells_df.groupby(['Well Name'])
new_var = pd.DataFrame()
for key in grouped.groups.keys():
depth = grouped.get_group(key)['Depth']
temp_df = pd.DataFrame()
temp_df['Depth'] = depth
NM_M = grouped.get_group(key)['NM_M']
for window in windows:
temp_df['Depth'] = grouped.get_group(key)['Depth']
temp_df['Well Name'] = [key for _ in range(len(NM_M))]
temp_df['NM_M'] = grouped.get_group(key)['NM_M']
#We initialize a new variable
temp_df['Marine_ratio_' + str(window) + '_centered'] = pd.rolling_mean(arg=temp_df['NM_M'], window=window, min_periods=1, center=True)
new_var = new_var.append(temp_df)
new_var = new_var.sort_index()
new_var =new_var.drop(['Well Name', 'Depth','NM_M'],axis=1)
return new_var
def make_distance_to_M_up_vars(wells_df):
grouped = wells_df.groupby(['Well Name'])
new_var = pd.DataFrame()
for key in grouped.groups.keys():
NM_M = grouped.get_group(key)['NM_M'].values
#We create a temporary dataframe that we reset for every well
temp_df = pd.DataFrame()
temp_df['Depth'] = grouped.get_group(key)['Depth']
temp_df['Well Name'] = [key for _ in range(len(NM_M))]
#We initialize a new variable
dist_mar_up = np.zeros(len(NM_M))
# A variable counting the interval from the ipper marine deposit
# We initialize it to -99999 since we do not know what's abpve the first log
count = -1
for i in range(len(NM_M)):
if ((NM_M[i] == 1) & (count>-1)):
count+=0.5
dist_mar_up[i] += count
elif NM_M[i] == 2:
count=0
else:
dist_mar_up[i] = count
temp_df['dist_M_up'] = dist_mar_up
# We append each well variable to a larger dataframe
# We use a dataframe to preserve the index
new_var = new_var.append(temp_df)
new_var = new_var.sort_index()
new_var =new_var.drop(['Well Name','Depth'],axis=1)
return new_var
def make_distance_to_M_down_vars(wells_df):
grouped = wells_df.groupby(['Well Name'])
new_var = pd.DataFrame()
for key in grouped.groups.keys():
NM_M = grouped.get_group(key)['NM_M'].values
temp_df = pd.DataFrame()
temp_df['Depth'] = grouped.get_group(key)['Depth']
temp_df['Well Name'] = [key for _ in range(len(NM_M))]
dist_mar_down = np.zeros(len(NM_M))
count = -1
for i in range(len(NM_M)-1,-1,-1):
if ((NM_M[i] == 1) & (count>-1)):
count+=0.5
dist_mar_down[i] += count
elif NM_M[i] == 2:
count=0
else:
dist_mar_down[i] = count
temp_df['dist_M_down'] = dist_mar_down
new_var = new_var.append(temp_df)
new_var = new_var.sort_index()
new_var =new_var.drop(['Well Name','Depth'],axis=1)
return new_var
def make_distance_to_NM_up_vars(wells_df):
grouped = wells_df.groupby(['Well Name'])
new_var = pd.DataFrame()
for key in grouped.groups.keys():
NM_M = grouped.get_group(key)['NM_M'].values
#We create a temporary dataframe that we reset for every well
temp_df = pd.DataFrame()
temp_df['Depth'] = grouped.get_group(key)['Depth']
temp_df['Well Name'] = [key for _ in range(len(NM_M))]
#We initialize a new variable
dist_mar_up = np.zeros(len(NM_M))
# A variable counting the interval from the ipper marine deposit
# We initialize it to -99999 since we do not know what's abpve the first log
count = -1
for i in range(len(NM_M)):
if ((NM_M[i] == 2) & (count>-1)):
count+=0.5
dist_mar_up[i] += count
elif NM_M[i] == 1:
count=0
else:
dist_mar_up[i] = count
temp_df['dist_NM_up'] = dist_mar_up
# We append each well variable to a larger dataframe
# We use a dataframe to preserve the index
new_var = new_var.append(temp_df)
new_var = new_var.sort_index()
new_var =new_var.drop(['Well Name','Depth'],axis=1)
return new_var
def make_distance_to_NM_down_vars(wells_df):
grouped = wells_df.groupby(['Well Name'])
new_var = pd.DataFrame()
for key in grouped.groups.keys():
NM_M = grouped.get_group(key)['NM_M'].values
temp_df = pd.DataFrame()
temp_df['Depth'] = grouped.get_group(key)['Depth']
temp_df['Well Name'] = [key for _ in range(len(NM_M))]
dist_mar_down = np.zeros(len(NM_M))
count = -1
for i in range(len(NM_M)-1,-1,-1):
if ((NM_M[i] == 2) & (count>-1)):
count+=0.5
dist_mar_down[i] += count
elif NM_M[i] == 1:
count=0
else:
dist_mar_down[i] = count
temp_df['dist_NM_down'] = dist_mar_down
new_var = new_var.append(temp_df)
new_var = new_var.sort_index()
new_var =new_var.drop(['Well Name','Depth'],axis=1)
return new_var
| apache-2.0 |
pypot/scikit-learn | sklearn/utils/tests/test_shortest_path.py | 88 | 2828 | from collections import defaultdict
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.utils.graph import (graph_shortest_path,
single_source_shortest_path_length)
def floyd_warshall_slow(graph, directed=False):
N = graph.shape[0]
#set nonzero entries to infinity
graph[np.where(graph == 0)] = np.inf
#set diagonal to zero
graph.flat[::N + 1] = 0
if not directed:
graph = np.minimum(graph, graph.T)
for k in range(N):
for i in range(N):
for j in range(N):
graph[i, j] = min(graph[i, j], graph[i, k] + graph[k, j])
graph[np.where(np.isinf(graph))] = 0
return graph
def generate_graph(N=20):
#sparse grid of distances
rng = np.random.RandomState(0)
dist_matrix = rng.random_sample((N, N))
#make symmetric: distances are not direction-dependent
dist_matrix += dist_matrix.T
#make graph sparse
i = (rng.randint(N, size=N * N // 2), rng.randint(N, size=N * N // 2))
dist_matrix[i] = 0
#set diagonal to zero
dist_matrix.flat[::N + 1] = 0
return dist_matrix
def test_floyd_warshall():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_FW = graph_shortest_path(dist_matrix, directed, 'FW')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_FW, graph_py)
def test_dijkstra():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_D = graph_shortest_path(dist_matrix, directed, 'D')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_D, graph_py)
def test_shortest_path():
dist_matrix = generate_graph(20)
# We compare path length and not costs (-> set distances to 0 or 1)
dist_matrix[dist_matrix != 0] = 1
for directed in (True, False):
if not directed:
dist_matrix = np.minimum(dist_matrix, dist_matrix.T)
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
for i in range(dist_matrix.shape[0]):
# Non-reachable nodes have distance 0 in graph_py
dist_dict = defaultdict(int)
dist_dict.update(single_source_shortest_path_length(dist_matrix,
i))
for j in range(graph_py[i].shape[0]):
assert_array_almost_equal(dist_dict[j], graph_py[i, j])
def test_dijkstra_bug_fix():
X = np.array([[0., 0., 4.],
[1., 0., 2.],
[0., 5., 0.]])
dist_FW = graph_shortest_path(X, directed=False, method='FW')
dist_D = graph_shortest_path(X, directed=False, method='D')
assert_array_almost_equal(dist_D, dist_FW)
| bsd-3-clause |
RomainBrault/scikit-learn | sklearn/ensemble/tests/test_forest.py | 9 | 43013 | """
Testing for the forest module (sklearn.ensemble.forest).
"""
# Authors: Gilles Louppe,
# Brian Holt,
# Andreas Mueller,
# Arnaud Joly
# License: BSD 3 clause
import pickle
from collections import defaultdict
from itertools import combinations
from itertools import product
import numpy as np
from scipy.misc import comb
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import coo_matrix
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_less, assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import skip_if_32bit
from sklearn import datasets
from sklearn.decomposition import TruncatedSVD
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomTreesEmbedding
from sklearn.model_selection import GridSearchCV
from sklearn.svm import LinearSVC
from sklearn.utils.fixes import bincount
from sklearn.utils.validation import check_random_state
from sklearn.tree.tree import SPARSE_SPLITTERS
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = check_random_state(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also make a hastie_10_2 dataset
hastie_X, hastie_y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
hastie_X = hastie_X.astype(np.float32)
FOREST_CLASSIFIERS = {
"ExtraTreesClassifier": ExtraTreesClassifier,
"RandomForestClassifier": RandomForestClassifier,
}
FOREST_REGRESSORS = {
"ExtraTreesRegressor": ExtraTreesRegressor,
"RandomForestRegressor": RandomForestRegressor,
}
FOREST_TRANSFORMERS = {
"RandomTreesEmbedding": RandomTreesEmbedding,
}
FOREST_ESTIMATORS = dict()
FOREST_ESTIMATORS.update(FOREST_CLASSIFIERS)
FOREST_ESTIMATORS.update(FOREST_REGRESSORS)
FOREST_ESTIMATORS.update(FOREST_TRANSFORMERS)
def check_classification_toy(name):
"""Check classification on a toy dataset."""
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
clf = ForestClassifier(n_estimators=10, max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
# also test apply
leaf_indices = clf.apply(X)
assert_equal(leaf_indices.shape, (len(X), clf.n_estimators))
def test_classification_toy():
for name in FOREST_CLASSIFIERS:
yield check_classification_toy, name
def check_iris_criterion(name, criterion):
# Check consistency on dataset iris.
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, criterion=criterion,
random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9, "Failed with criterion %s and score = %f"
% (criterion, score))
clf = ForestClassifier(n_estimators=10, criterion=criterion,
max_features=2, random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.5, "Failed with criterion %s and score = %f"
% (criterion, score))
def test_iris():
for name, criterion in product(FOREST_CLASSIFIERS, ("gini", "entropy")):
yield check_iris_criterion, name, criterion
def check_boston_criterion(name, criterion):
# Check consistency on dataset boston house prices.
ForestRegressor = FOREST_REGRESSORS[name]
clf = ForestRegressor(n_estimators=5, criterion=criterion,
random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.94, "Failed with max_features=None, criterion %s "
"and score = %f" % (criterion, score))
clf = ForestRegressor(n_estimators=5, criterion=criterion,
max_features=6, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=6, criterion %s "
"and score = %f" % (criterion, score))
def test_boston():
for name, criterion in product(FOREST_REGRESSORS, ("mse", "mae", "friedman_mse")):
yield check_boston_criterion, name, criterion
def check_regressor_attributes(name):
# Regression models should not have a classes_ attribute.
r = FOREST_REGRESSORS[name](random_state=0)
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
r.fit([[1, 2, 3], [4, 5, 6]], [1, 2])
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
def test_regressor_attributes():
for name in FOREST_REGRESSORS:
yield check_regressor_attributes, name
def check_probability(name):
# Predict probabilities.
ForestClassifier = FOREST_CLASSIFIERS[name]
with np.errstate(divide="ignore"):
clf = ForestClassifier(n_estimators=10, random_state=1, max_features=1,
max_depth=1)
clf.fit(iris.data, iris.target)
assert_array_almost_equal(np.sum(clf.predict_proba(iris.data), axis=1),
np.ones(iris.data.shape[0]))
assert_array_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)))
def test_probability():
for name in FOREST_CLASSIFIERS:
yield check_probability, name
def check_importances(name, criterion, X, y):
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=20, criterion=criterion,
random_state=0)
est.fit(X, y)
importances = est.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10)
assert_equal(n_important, 3)
# Check with parallel
importances = est.feature_importances_
est.set_params(n_jobs=2)
importances_parrallel = est.feature_importances_
assert_array_almost_equal(importances, importances_parrallel)
# Check with sample weights
sample_weight = check_random_state(0).randint(1, 10, len(X))
est = ForestEstimator(n_estimators=20, random_state=0, criterion=criterion)
est.fit(X, y, sample_weight=sample_weight)
importances = est.feature_importances_
assert_true(np.all(importances >= 0.0))
for scale in [0.5, 10, 100]:
est = ForestEstimator(n_estimators=20, random_state=0, criterion=criterion)
est.fit(X, y, sample_weight=scale * sample_weight)
importances_bis = est.feature_importances_
assert_less(np.abs(importances - importances_bis).mean(), 0.001)
@skip_if_32bit
def test_importances():
X, y = datasets.make_classification(n_samples=500, n_features=10,
n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False,
random_state=0)
for name, criterion in product(FOREST_CLASSIFIERS, ["gini", "entropy"]):
yield check_importances, name, criterion, X, y
for name, criterion in product(FOREST_REGRESSORS, ["mse", "friedman_mse", "mae"]):
yield check_importances, name, criterion, X, y
def test_importances_asymptotic():
# Check whether variable importances of totally randomized trees
# converge towards their theoretical values (See Louppe et al,
# Understanding variable importances in forests of randomized trees, 2013).
def binomial(k, n):
return 0 if k < 0 or k > n else comb(int(n), int(k), exact=True)
def entropy(samples):
n_samples = len(samples)
entropy = 0.
for count in bincount(samples):
p = 1. * count / n_samples
if p > 0:
entropy -= p * np.log2(p)
return entropy
def mdi_importance(X_m, X, y):
n_samples, n_features = X.shape
features = list(range(n_features))
features.pop(X_m)
values = [np.unique(X[:, i]) for i in range(n_features)]
imp = 0.
for k in range(n_features):
# Weight of each B of size k
coef = 1. / (binomial(k, n_features) * (n_features - k))
# For all B of size k
for B in combinations(features, k):
# For all values B=b
for b in product(*[values[B[j]] for j in range(k)]):
mask_b = np.ones(n_samples, dtype=np.bool)
for j in range(k):
mask_b &= X[:, B[j]] == b[j]
X_, y_ = X[mask_b, :], y[mask_b]
n_samples_b = len(X_)
if n_samples_b > 0:
children = []
for xi in values[X_m]:
mask_xi = X_[:, X_m] == xi
children.append(y_[mask_xi])
imp += (coef
* (1. * n_samples_b / n_samples) # P(B=b)
* (entropy(y_) -
sum([entropy(c) * len(c) / n_samples_b
for c in children])))
return imp
data = np.array([[0, 0, 1, 0, 0, 1, 0, 1],
[1, 0, 1, 1, 1, 0, 1, 2],
[1, 0, 1, 1, 0, 1, 1, 3],
[0, 1, 1, 1, 0, 1, 0, 4],
[1, 1, 0, 1, 0, 1, 1, 5],
[1, 1, 0, 1, 1, 1, 1, 6],
[1, 0, 1, 0, 0, 1, 0, 7],
[1, 1, 1, 1, 1, 1, 1, 8],
[1, 1, 1, 1, 0, 1, 1, 9],
[1, 1, 1, 0, 1, 1, 1, 0]])
X, y = np.array(data[:, :7], dtype=np.bool), data[:, 7]
n_features = X.shape[1]
# Compute true importances
true_importances = np.zeros(n_features)
for i in range(n_features):
true_importances[i] = mdi_importance(i, X, y)
# Estimate importances with totally randomized trees
clf = ExtraTreesClassifier(n_estimators=500,
max_features=1,
criterion="entropy",
random_state=0).fit(X, y)
importances = sum(tree.tree_.compute_feature_importances(normalize=False)
for tree in clf.estimators_) / clf.n_estimators
# Check correctness
assert_almost_equal(entropy(y), sum(importances))
assert_less(np.abs(true_importances - importances).mean(), 0.01)
def check_unfitted_feature_importances(name):
assert_raises(ValueError, getattr, FOREST_ESTIMATORS[name](random_state=0),
"feature_importances_")
def test_unfitted_feature_importances():
for name in FOREST_ESTIMATORS:
yield check_unfitted_feature_importances, name
def check_oob_score(name, X, y, n_estimators=20):
# Check that oob prediction is a good estimation of the generalization
# error.
# Proper behavior
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=n_estimators, bootstrap=True)
n_samples = X.shape[0]
est.fit(X[:n_samples // 2, :], y[:n_samples // 2])
test_score = est.score(X[n_samples // 2:, :], y[n_samples // 2:])
if name in FOREST_CLASSIFIERS:
assert_less(abs(test_score - est.oob_score_), 0.1)
else:
assert_greater(test_score, est.oob_score_)
assert_greater(est.oob_score_, .8)
# Check warning if not enough estimators
with np.errstate(divide="ignore", invalid="ignore"):
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=1, bootstrap=True)
assert_warns(UserWarning, est.fit, X, y)
def test_oob_score():
for name in FOREST_CLASSIFIERS:
yield check_oob_score, name, iris.data, iris.target
# csc matrix
yield check_oob_score, name, csc_matrix(iris.data), iris.target
# non-contiguous targets in classification
yield check_oob_score, name, iris.data, iris.target * 2 + 1
for name in FOREST_REGRESSORS:
yield check_oob_score, name, boston.data, boston.target, 50
# csc matrix
yield check_oob_score, name, csc_matrix(boston.data), boston.target, 50
def check_oob_score_raise_error(name):
ForestEstimator = FOREST_ESTIMATORS[name]
if name in FOREST_TRANSFORMERS:
for oob_score in [True, False]:
assert_raises(TypeError, ForestEstimator, oob_score=oob_score)
assert_raises(NotImplementedError, ForestEstimator()._set_oob_score,
X, y)
else:
# Unfitted / no bootstrap / no oob_score
for oob_score, bootstrap in [(True, False), (False, True),
(False, False)]:
est = ForestEstimator(oob_score=oob_score, bootstrap=bootstrap,
random_state=0)
assert_false(hasattr(est, "oob_score_"))
# No bootstrap
assert_raises(ValueError, ForestEstimator(oob_score=True,
bootstrap=False).fit, X, y)
def test_oob_score_raise_error():
for name in FOREST_ESTIMATORS:
yield check_oob_score_raise_error, name
def check_gridsearch(name):
forest = FOREST_CLASSIFIERS[name]()
clf = GridSearchCV(forest, {'n_estimators': (1, 2), 'max_depth': (1, 2)})
clf.fit(iris.data, iris.target)
def test_gridsearch():
# Check that base trees can be grid-searched.
for name in FOREST_CLASSIFIERS:
yield check_gridsearch, name
def check_parallel(name, X, y):
"""Check parallel computations in classification"""
ForestEstimator = FOREST_ESTIMATORS[name]
forest = ForestEstimator(n_estimators=10, n_jobs=3, random_state=0)
forest.fit(X, y)
assert_equal(len(forest), 10)
forest.set_params(n_jobs=1)
y1 = forest.predict(X)
forest.set_params(n_jobs=2)
y2 = forest.predict(X)
assert_array_almost_equal(y1, y2, 3)
def test_parallel():
for name in FOREST_CLASSIFIERS:
yield check_parallel, name, iris.data, iris.target
for name in FOREST_REGRESSORS:
yield check_parallel, name, boston.data, boston.target
def check_pickle(name, X, y):
# Check pickability.
ForestEstimator = FOREST_ESTIMATORS[name]
obj = ForestEstimator(random_state=0)
obj.fit(X, y)
score = obj.score(X, y)
pickle_object = pickle.dumps(obj)
obj2 = pickle.loads(pickle_object)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(X, y)
assert_equal(score, score2)
def test_pickle():
for name in FOREST_CLASSIFIERS:
yield check_pickle, name, iris.data[::2], iris.target[::2]
for name in FOREST_REGRESSORS:
yield check_pickle, name, boston.data[::2], boston.target[::2]
def check_multioutput(name):
# Check estimators on multi-output problems.
X_train = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-2, 1],
[-1, 1], [-1, 2], [2, -1], [1, -1], [1, -2]]
y_train = [[-1, 0], [-1, 0], [-1, 0], [1, 1], [1, 1], [1, 1], [-1, 2],
[-1, 2], [-1, 2], [1, 3], [1, 3], [1, 3]]
X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_test = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
y_pred = est.fit(X_train, y_train).predict(X_test)
assert_array_almost_equal(y_pred, y_test)
if name in FOREST_CLASSIFIERS:
with np.errstate(divide="ignore"):
proba = est.predict_proba(X_test)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = est.predict_log_proba(X_test)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
def test_multioutput():
for name in FOREST_CLASSIFIERS:
yield check_multioutput, name
for name in FOREST_REGRESSORS:
yield check_multioutput, name
def check_classes_shape(name):
# Test that n_classes_ and classes_ have proper shape.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Classification, single output
clf = ForestClassifier(random_state=0).fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(random_state=0).fit(X, _y)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_classes_shape():
for name in FOREST_CLASSIFIERS:
yield check_classes_shape, name
def test_random_trees_dense_type():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning a dense array.
# Create the RTE with sparse=False
hasher = RandomTreesEmbedding(n_estimators=10, sparse_output=False)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# Assert that type is ndarray, not scipy.sparse.csr.csr_matrix
assert_equal(type(X_transformed), np.ndarray)
def test_random_trees_dense_equal():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning the same array for both argument values.
# Create the RTEs
hasher_dense = RandomTreesEmbedding(n_estimators=10, sparse_output=False,
random_state=0)
hasher_sparse = RandomTreesEmbedding(n_estimators=10, sparse_output=True,
random_state=0)
X, y = datasets.make_circles(factor=0.5)
X_transformed_dense = hasher_dense.fit_transform(X)
X_transformed_sparse = hasher_sparse.fit_transform(X)
# Assert that dense and sparse hashers have same array.
assert_array_equal(X_transformed_sparse.toarray(), X_transformed_dense)
# Ignore warnings from switching to more power iterations in randomized_svd
@ignore_warnings
def test_random_hasher():
# test random forest hashing on circles dataset
# make sure that it is linearly separable.
# even after projected to two SVD dimensions
# Note: Not all random_states produce perfect results.
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# test fit and transform:
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
assert_array_equal(hasher.fit(X).transform(X).toarray(),
X_transformed.toarray())
# one leaf active per data point per forest
assert_equal(X_transformed.shape[0], X.shape[0])
assert_array_equal(X_transformed.sum(axis=1), hasher.n_estimators)
svd = TruncatedSVD(n_components=2)
X_reduced = svd.fit_transform(X_transformed)
linear_clf = LinearSVC()
linear_clf.fit(X_reduced, y)
assert_equal(linear_clf.score(X_reduced, y), 1.)
def test_random_hasher_sparse_data():
X, y = datasets.make_multilabel_classification(random_state=0)
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X_transformed = hasher.fit_transform(X)
X_transformed_sparse = hasher.fit_transform(csc_matrix(X))
assert_array_equal(X_transformed_sparse.toarray(), X_transformed.toarray())
def test_parallel_train():
rng = check_random_state(12321)
n_samples, n_features = 80, 30
X_train = rng.randn(n_samples, n_features)
y_train = rng.randint(0, 2, n_samples)
clfs = [
RandomForestClassifier(n_estimators=20, n_jobs=n_jobs,
random_state=12345).fit(X_train, y_train)
for n_jobs in [1, 2, 3, 8, 16, 32]
]
X_test = rng.randn(n_samples, n_features)
probas = [clf.predict_proba(X_test) for clf in clfs]
for proba1, proba2 in zip(probas, probas[1:]):
assert_array_almost_equal(proba1, proba2)
def test_distribution():
rng = check_random_state(12321)
# Single variable with 4 values
X = rng.randint(0, 4, size=(1000, 1))
y = rng.rand(1000)
n_trees = 500
clf = ExtraTreesRegressor(n_estimators=n_trees, random_state=42).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = sorted([(1. * count / n_trees, tree)
for tree, count in uniques.items()])
# On a single variable problem where X_0 has 4 equiprobable values, there
# are 5 ways to build a random tree. The more compact (0,1/0,0/--0,2/--) of
# them has probability 1/3 while the 4 others have probability 1/6.
assert_equal(len(uniques), 5)
assert_greater(0.20, uniques[0][0]) # Rough approximation of 1/6.
assert_greater(0.20, uniques[1][0])
assert_greater(0.20, uniques[2][0])
assert_greater(0.20, uniques[3][0])
assert_greater(uniques[4][0], 0.3)
assert_equal(uniques[4][1], "0,1/0,0/--0,2/--")
# Two variables, one with 2 values, one with 3 values
X = np.empty((1000, 2))
X[:, 0] = np.random.randint(0, 2, 1000)
X[:, 1] = np.random.randint(0, 3, 1000)
y = rng.rand(1000)
clf = ExtraTreesRegressor(n_estimators=100, max_features=1,
random_state=1).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = [(count, tree) for tree, count in uniques.items()]
assert_equal(len(uniques), 8)
def check_max_leaf_nodes_max_depth(name):
X, y = hastie_X, hastie_y
# Test precedence of max_leaf_nodes over max_depth.
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(max_depth=1, max_leaf_nodes=4,
n_estimators=1, random_state=0).fit(X, y)
assert_greater(est.estimators_[0].tree_.max_depth, 1)
est = ForestEstimator(max_depth=1, n_estimators=1,
random_state=0).fit(X, y)
assert_equal(est.estimators_[0].tree_.max_depth, 1)
def test_max_leaf_nodes_max_depth():
for name in FOREST_ESTIMATORS:
yield check_max_leaf_nodes_max_depth, name
def check_min_samples_split(name):
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
# test boundary value
assert_raises(ValueError,
ForestEstimator(min_samples_split=-1).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_split=0).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_split=1.1).fit, X, y)
est = ForestEstimator(min_samples_split=10, n_estimators=1, random_state=0)
est.fit(X, y)
node_idx = est.estimators_[0].tree_.children_left != -1
node_samples = est.estimators_[0].tree_.n_node_samples[node_idx]
assert_greater(np.min(node_samples), len(X) * 0.5 - 1,
"Failed with {0}".format(name))
est = ForestEstimator(min_samples_split=0.5, n_estimators=1, random_state=0)
est.fit(X, y)
node_idx = est.estimators_[0].tree_.children_left != -1
node_samples = est.estimators_[0].tree_.n_node_samples[node_idx]
assert_greater(np.min(node_samples), len(X) * 0.5 - 1,
"Failed with {0}".format(name))
def test_min_samples_split():
for name in FOREST_ESTIMATORS:
yield check_min_samples_split, name
def check_min_samples_leaf(name):
X, y = hastie_X, hastie_y
# Test if leaves contain more than leaf_count training examples
ForestEstimator = FOREST_ESTIMATORS[name]
# test boundary value
assert_raises(ValueError,
ForestEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_leaf=0).fit, X, y)
est = ForestEstimator(min_samples_leaf=5, n_estimators=1, random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
est = ForestEstimator(min_samples_leaf=0.25, n_estimators=1,
random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), len(X) * 0.25 - 1,
"Failed with {0}".format(name))
def test_min_samples_leaf():
for name in FOREST_ESTIMATORS:
yield check_min_samples_leaf, name
def check_min_weight_fraction_leaf(name):
X, y = hastie_X, hastie_y
# Test if leaves contain at least min_weight_fraction_leaf of the
# training set
ForestEstimator = FOREST_ESTIMATORS[name]
rng = np.random.RandomState(0)
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for frac in np.linspace(0, 0.5, 6):
est = ForestEstimator(min_weight_fraction_leaf=frac, n_estimators=1,
random_state=0)
if "RandomForest" in name:
est.bootstrap = False
est.fit(X, y, sample_weight=weights)
out = est.estimators_[0].tree_.apply(X)
node_weights = bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
for name in FOREST_ESTIMATORS:
yield check_min_weight_fraction_leaf, name
def check_sparse_input(name, X, X_sparse, y):
ForestEstimator = FOREST_ESTIMATORS[name]
dense = ForestEstimator(random_state=0, max_depth=2).fit(X, y)
sparse = ForestEstimator(random_state=0, max_depth=2).fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
if name in FOREST_CLASSIFIERS:
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
if name in FOREST_TRANSFORMERS:
assert_array_almost_equal(sparse.transform(X).toarray(),
dense.transform(X).toarray())
assert_array_almost_equal(sparse.fit_transform(X).toarray(),
dense.fit_transform(X).toarray())
def test_sparse_input():
X, y = datasets.make_multilabel_classification(random_state=0,
n_samples=50)
for name, sparse_matrix in product(FOREST_ESTIMATORS,
(csr_matrix, csc_matrix, coo_matrix)):
yield check_sparse_input, name, X, sparse_matrix(X), y
def check_memory_layout(name, dtype):
# Check that it works no matter the memory layout
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.base_estimator.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# coo_matrix
X = coo_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_memory_layout():
for name, dtype in product(FOREST_CLASSIFIERS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
for name, dtype in product(FOREST_REGRESSORS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
@ignore_warnings
def check_1d_input(name, X, X_2d, y):
ForestEstimator = FOREST_ESTIMATORS[name]
assert_raises(ValueError, ForestEstimator(n_estimators=1,
random_state=0).fit, X, y)
est = ForestEstimator(random_state=0)
est.fit(X_2d, y)
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_raises(ValueError, est.predict, X)
@ignore_warnings
def test_1d_input():
X = iris.data[:, 0]
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
for name in FOREST_ESTIMATORS:
yield check_1d_input, name, X, X_2d, y
def check_class_weights(name):
# Check class_weights resemble sample_weights behavior.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = ForestClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = ForestClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "balanced" which should also have no effect
clf4 = ForestClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Using a Python 2.x list as the sample_weight parameter used to raise
# an exception. This test makes sure such code will now run correctly.
clf = ForestClassifier()
sample_weight = [1.] * len(iris.data)
clf.fit(iris.data, iris.target, sample_weight=sample_weight)
def test_class_weights():
for name in FOREST_CLASSIFIERS:
yield check_class_weights, name
def check_class_weight_balanced_and_bootstrap_multi_output(name):
# Test class_weight works for multi-output"""
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(class_weight='balanced', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}, {-2: 1., 2: 1.}],
random_state=0)
clf.fit(X, _y)
# smoke test for balanced subsample
clf = ForestClassifier(class_weight='balanced_subsample', random_state=0)
clf.fit(X, _y)
def test_class_weight_balanced_and_bootstrap_multi_output():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_balanced_and_bootstrap_multi_output, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = ForestClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Warning warm_start with preset
clf = ForestClassifier(class_weight='balanced', warm_start=True,
random_state=0)
assert_warns(UserWarning, clf.fit, X, y)
assert_warns(UserWarning, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = ForestClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_errors, name
def check_warm_start(name, random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = ForestEstimator(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = ForestEstimator(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
assert_array_equal(clf_ws.apply(X), clf_no_ws.apply(X),
err_msg="Failed with {0}".format(name))
def test_warm_start():
for name in FOREST_ESTIMATORS:
yield check_warm_start, name
def check_warm_start_clear(name):
# Test if fit clears state and grows a new forest when warm_start==False.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True,
random_state=2)
clf_2.fit(X, y) # inits state
clf_2.set_params(warm_start=False, random_state=1)
clf_2.fit(X, y) # clears old state and equals clf
assert_array_almost_equal(clf_2.apply(X), clf.apply(X))
def test_warm_start_clear():
for name in FOREST_ESTIMATORS:
yield check_warm_start_clear, name
def check_warm_start_smaller_n_estimators(name):
# Test if warm start second fit with smaller n_estimators raises error.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_smaller_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_smaller_n_estimators, name
def check_warm_start_equal_n_estimators(name):
# Test if warm start with equal n_estimators does nothing and returns the
# same forest and raises a warning.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf_2.fit(X, y)
# Now clf_2 equals clf.
clf_2.set_params(random_state=2)
assert_warns(UserWarning, clf_2.fit, X, y)
# If we had fit the trees again we would have got a different forest as we
# changed the random state.
assert_array_equal(clf.apply(X), clf_2.apply(X))
def test_warm_start_equal_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_equal_n_estimators, name
def check_warm_start_oob(name):
# Test that the warm start computes oob score when asked.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
# Use 15 estimators to avoid 'some inputs do not have OOB scores' warning.
clf = ForestEstimator(n_estimators=15, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=True)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=False)
clf_2.fit(X, y)
clf_2.set_params(warm_start=True, oob_score=True, n_estimators=15)
clf_2.fit(X, y)
assert_true(hasattr(clf_2, 'oob_score_'))
assert_equal(clf.oob_score_, clf_2.oob_score_)
# Test that oob_score is computed even if we don't need to train
# additional trees.
clf_3 = ForestEstimator(n_estimators=15, max_depth=3, warm_start=True,
random_state=1, bootstrap=True, oob_score=False)
clf_3.fit(X, y)
assert_true(not(hasattr(clf_3, 'oob_score_')))
clf_3.set_params(oob_score=True)
ignore_warnings(clf_3.fit)(X, y)
assert_equal(clf.oob_score_, clf_3.oob_score_)
def test_warm_start_oob():
for name in FOREST_CLASSIFIERS:
yield check_warm_start_oob, name
for name in FOREST_REGRESSORS:
yield check_warm_start_oob, name
def test_dtype_convert(n_classes=15):
classifier = RandomForestClassifier(random_state=0, bootstrap=False)
X = np.eye(n_classes)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:n_classes]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(classifier.classes_, y)
assert_array_equal(result, y)
def check_decision_path(name):
X, y = hastie_X, hastie_y
n_samples = X.shape[0]
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
est.fit(X, y)
indicator, n_nodes_ptr = est.decision_path(X)
assert_equal(indicator.shape[1], n_nodes_ptr[-1])
assert_equal(indicator.shape[0], n_samples)
assert_array_equal(np.diff(n_nodes_ptr),
[e.tree_.node_count for e in est.estimators_])
# Assert that leaves index are correct
leaves = est.apply(X)
for est_id in range(leaves.shape[1]):
leave_indicator = [indicator[i, n_nodes_ptr[est_id] + j]
for i, j in enumerate(leaves[:, est_id])]
assert_array_almost_equal(leave_indicator, np.ones(shape=n_samples))
def test_decision_path():
for name in FOREST_CLASSIFIERS:
yield check_decision_path, name
for name in FOREST_REGRESSORS:
yield check_decision_path, name
def test_min_impurity_split():
# Test if min_impurity_split of base estimators is set
# Regression test for #8006
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [RandomForestClassifier, RandomForestRegressor,
ExtraTreesClassifier, ExtraTreesRegressor]
for Estimator in all_estimators:
est = Estimator(min_impurity_split=0.1)
est = assert_warns_message(DeprecationWarning, "min_impurity_decrease",
est.fit, X, y)
for tree in est.estimators_:
assert_equal(tree.min_impurity_split, 0.1)
def test_min_impurity_decrease():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [RandomForestClassifier, RandomForestRegressor,
ExtraTreesClassifier, ExtraTreesRegressor]
for Estimator in all_estimators:
est = Estimator(min_impurity_decrease=0.1)
est.fit(X, y)
for tree in est.estimators_:
# Simply check if the parameter is passed on correctly. Tree tests
# will suffice for the actual working of this param
assert_equal(tree.min_impurity_decrease, 0.1)
| bsd-3-clause |
gfyoung/pandas | pandas/tests/io/test_parquet.py | 1 | 36752 | """ test parquet compat """
import datetime
from distutils.version import LooseVersion
from io import BytesIO
import os
import pathlib
from warnings import catch_warnings
import numpy as np
import pytest
from pandas.compat import PY38, is_platform_windows
import pandas.util._test_decorators as td
import pandas as pd
import pandas._testing as tm
from pandas.io.parquet import (
FastParquetImpl,
PyArrowImpl,
get_engine,
read_parquet,
to_parquet,
)
try:
import pyarrow
_HAVE_PYARROW = True
except ImportError:
_HAVE_PYARROW = False
try:
import fastparquet
_HAVE_FASTPARQUET = True
except ImportError:
_HAVE_FASTPARQUET = False
pytestmark = pytest.mark.filterwarnings(
"ignore:RangeIndex.* is deprecated:DeprecationWarning"
)
# setup engines & skips
@pytest.fixture(
params=[
pytest.param(
"fastparquet",
marks=pytest.mark.skipif(
not _HAVE_FASTPARQUET, reason="fastparquet is not installed"
),
),
pytest.param(
"pyarrow",
marks=pytest.mark.skipif(
not _HAVE_PYARROW, reason="pyarrow is not installed"
),
),
]
)
def engine(request):
return request.param
@pytest.fixture
def pa():
if not _HAVE_PYARROW:
pytest.skip("pyarrow is not installed")
return "pyarrow"
@pytest.fixture
def fp():
if not _HAVE_FASTPARQUET:
pytest.skip("fastparquet is not installed")
return "fastparquet"
@pytest.fixture
def df_compat():
return pd.DataFrame({"A": [1, 2, 3], "B": "foo"})
@pytest.fixture
def df_cross_compat():
df = pd.DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
# 'c': np.arange(3, 6).astype('u1'),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("20130101", periods=3),
# 'g': pd.date_range('20130101', periods=3,
# tz='US/Eastern'),
# 'h': pd.date_range('20130101', periods=3, freq='ns')
}
)
return df
@pytest.fixture
def df_full():
return pd.DataFrame(
{
"string": list("abc"),
"string_with_nan": ["a", np.nan, "c"],
"string_with_none": ["a", None, "c"],
"bytes": [b"foo", b"bar", b"baz"],
"unicode": ["foo", "bar", "baz"],
"int": list(range(1, 4)),
"uint": np.arange(3, 6).astype("u1"),
"float": np.arange(4.0, 7.0, dtype="float64"),
"float_with_nan": [2.0, np.nan, 3.0],
"bool": [True, False, True],
"datetime": pd.date_range("20130101", periods=3),
"datetime_with_nat": [
pd.Timestamp("20130101"),
pd.NaT,
pd.Timestamp("20130103"),
],
}
)
@pytest.fixture(
params=[
datetime.datetime.now(datetime.timezone.utc),
datetime.datetime.now(datetime.timezone.min),
datetime.datetime.now(datetime.timezone.max),
datetime.datetime.strptime("2019-01-04T16:41:24+0200", "%Y-%m-%dT%H:%M:%S%z"),
datetime.datetime.strptime("2019-01-04T16:41:24+0215", "%Y-%m-%dT%H:%M:%S%z"),
datetime.datetime.strptime("2019-01-04T16:41:24-0200", "%Y-%m-%dT%H:%M:%S%z"),
datetime.datetime.strptime("2019-01-04T16:41:24-0215", "%Y-%m-%dT%H:%M:%S%z"),
]
)
def timezone_aware_date_list(request):
return request.param
def check_round_trip(
df,
engine=None,
path=None,
write_kwargs=None,
read_kwargs=None,
expected=None,
check_names=True,
check_like=False,
check_dtype=True,
repeat=2,
):
"""Verify parquet serializer and deserializer produce the same results.
Performs a pandas to disk and disk to pandas round trip,
then compares the 2 resulting DataFrames to verify equality.
Parameters
----------
df: Dataframe
engine: str, optional
'pyarrow' or 'fastparquet'
path: str, optional
write_kwargs: dict of str:str, optional
read_kwargs: dict of str:str, optional
expected: DataFrame, optional
Expected deserialization result, otherwise will be equal to `df`
check_names: list of str, optional
Closed set of column names to be compared
check_like: bool, optional
If True, ignore the order of index & columns.
repeat: int, optional
How many times to repeat the test
"""
write_kwargs = write_kwargs or {"compression": None}
read_kwargs = read_kwargs or {}
if expected is None:
expected = df
if engine:
write_kwargs["engine"] = engine
read_kwargs["engine"] = engine
def compare(repeat):
for _ in range(repeat):
df.to_parquet(path, **write_kwargs)
with catch_warnings(record=True):
actual = read_parquet(path, **read_kwargs)
tm.assert_frame_equal(
expected,
actual,
check_names=check_names,
check_like=check_like,
check_dtype=check_dtype,
)
if path is None:
with tm.ensure_clean() as path:
compare(repeat)
else:
compare(repeat)
def test_invalid_engine(df_compat):
msg = "engine must be one of 'pyarrow', 'fastparquet'"
with pytest.raises(ValueError, match=msg):
check_round_trip(df_compat, "foo", "bar")
def test_options_py(df_compat, pa):
# use the set option
with pd.option_context("io.parquet.engine", "pyarrow"):
check_round_trip(df_compat)
def test_options_fp(df_compat, fp):
# use the set option
with pd.option_context("io.parquet.engine", "fastparquet"):
check_round_trip(df_compat)
def test_options_auto(df_compat, fp, pa):
# use the set option
with pd.option_context("io.parquet.engine", "auto"):
check_round_trip(df_compat)
def test_options_get_engine(fp, pa):
assert isinstance(get_engine("pyarrow"), PyArrowImpl)
assert isinstance(get_engine("fastparquet"), FastParquetImpl)
with pd.option_context("io.parquet.engine", "pyarrow"):
assert isinstance(get_engine("auto"), PyArrowImpl)
assert isinstance(get_engine("pyarrow"), PyArrowImpl)
assert isinstance(get_engine("fastparquet"), FastParquetImpl)
with pd.option_context("io.parquet.engine", "fastparquet"):
assert isinstance(get_engine("auto"), FastParquetImpl)
assert isinstance(get_engine("pyarrow"), PyArrowImpl)
assert isinstance(get_engine("fastparquet"), FastParquetImpl)
with pd.option_context("io.parquet.engine", "auto"):
assert isinstance(get_engine("auto"), PyArrowImpl)
assert isinstance(get_engine("pyarrow"), PyArrowImpl)
assert isinstance(get_engine("fastparquet"), FastParquetImpl)
def test_get_engine_auto_error_message():
# Expect different error messages from get_engine(engine="auto")
# if engines aren't installed vs. are installed but bad version
from pandas.compat._optional import VERSIONS
# Do we have engines installed, but a bad version of them?
pa_min_ver = VERSIONS.get("pyarrow")
fp_min_ver = VERSIONS.get("fastparquet")
have_pa_bad_version = (
False
if not _HAVE_PYARROW
else LooseVersion(pyarrow.__version__) < LooseVersion(pa_min_ver)
)
have_fp_bad_version = (
False
if not _HAVE_FASTPARQUET
else LooseVersion(fastparquet.__version__) < LooseVersion(fp_min_ver)
)
# Do we have usable engines installed?
have_usable_pa = _HAVE_PYARROW and not have_pa_bad_version
have_usable_fp = _HAVE_FASTPARQUET and not have_fp_bad_version
if not have_usable_pa and not have_usable_fp:
# No usable engines found.
if have_pa_bad_version:
match = f"Pandas requires version .{pa_min_ver}. or newer of .pyarrow."
with pytest.raises(ImportError, match=match):
get_engine("auto")
else:
match = "Missing optional dependency .pyarrow."
with pytest.raises(ImportError, match=match):
get_engine("auto")
if have_fp_bad_version:
match = f"Pandas requires version .{fp_min_ver}. or newer of .fastparquet."
with pytest.raises(ImportError, match=match):
get_engine("auto")
else:
match = "Missing optional dependency .fastparquet."
with pytest.raises(ImportError, match=match):
get_engine("auto")
def test_cross_engine_pa_fp(df_cross_compat, pa, fp):
# cross-compat with differing reading/writing engines
df = df_cross_compat
with tm.ensure_clean() as path:
df.to_parquet(path, engine=pa, compression=None)
result = read_parquet(path, engine=fp)
tm.assert_frame_equal(result, df)
result = read_parquet(path, engine=fp, columns=["a", "d"])
tm.assert_frame_equal(result, df[["a", "d"]])
def test_cross_engine_fp_pa(request, df_cross_compat, pa, fp):
# cross-compat with differing reading/writing engines
if (
LooseVersion(pyarrow.__version__) < "0.15"
and LooseVersion(pyarrow.__version__) >= "0.13"
):
request.node.add_marker(
pytest.mark.xfail(
"Reading fastparquet with pyarrow in 0.14 fails: "
"https://issues.apache.org/jira/browse/ARROW-6492"
)
)
df = df_cross_compat
with tm.ensure_clean() as path:
df.to_parquet(path, engine=fp, compression=None)
with catch_warnings(record=True):
result = read_parquet(path, engine=pa)
tm.assert_frame_equal(result, df)
result = read_parquet(path, engine=pa, columns=["a", "d"])
tm.assert_frame_equal(result, df[["a", "d"]])
class Base:
def check_error_on_write(self, df, engine, exc, err_msg):
# check that we are raising the exception on writing
with tm.ensure_clean() as path:
with pytest.raises(exc, match=err_msg):
to_parquet(df, path, engine, compression=None)
def check_external_error_on_write(self, df, engine, exc):
# check that an external library is raising the exception on writing
with tm.ensure_clean() as path:
with tm.external_error_raised(exc):
to_parquet(df, path, engine, compression=None)
@tm.network
def test_parquet_read_from_url(self, df_compat, engine):
if engine != "auto":
pytest.importorskip(engine)
url = (
"https://raw.githubusercontent.com/pandas-dev/pandas/"
"master/pandas/tests/io/data/parquet/simple.parquet"
)
df = pd.read_parquet(url)
tm.assert_frame_equal(df, df_compat)
class TestBasic(Base):
def test_error(self, engine):
for obj in [
pd.Series([1, 2, 3]),
1,
"foo",
pd.Timestamp("20130101"),
np.array([1, 2, 3]),
]:
msg = "to_parquet only supports IO with DataFrames"
self.check_error_on_write(obj, engine, ValueError, msg)
def test_columns_dtypes(self, engine):
df = pd.DataFrame({"string": list("abc"), "int": list(range(1, 4))})
# unicode
df.columns = ["foo", "bar"]
check_round_trip(df, engine)
def test_columns_dtypes_invalid(self, engine):
df = pd.DataFrame({"string": list("abc"), "int": list(range(1, 4))})
msg = "parquet must have string column names"
# numeric
df.columns = [0, 1]
self.check_error_on_write(df, engine, ValueError, msg)
# bytes
df.columns = [b"foo", b"bar"]
self.check_error_on_write(df, engine, ValueError, msg)
# python object
df.columns = [
datetime.datetime(2011, 1, 1, 0, 0),
datetime.datetime(2011, 1, 1, 1, 1),
]
self.check_error_on_write(df, engine, ValueError, msg)
@pytest.mark.parametrize("compression", [None, "gzip", "snappy", "brotli"])
def test_compression(self, engine, compression):
if compression == "snappy":
pytest.importorskip("snappy")
elif compression == "brotli":
pytest.importorskip("brotli")
df = pd.DataFrame({"A": [1, 2, 3]})
check_round_trip(df, engine, write_kwargs={"compression": compression})
def test_read_columns(self, engine):
# GH18154
df = pd.DataFrame({"string": list("abc"), "int": list(range(1, 4))})
expected = pd.DataFrame({"string": list("abc")})
check_round_trip(
df, engine, expected=expected, read_kwargs={"columns": ["string"]}
)
def test_write_index(self, engine):
check_names = engine != "fastparquet"
df = pd.DataFrame({"A": [1, 2, 3]})
check_round_trip(df, engine)
indexes = [
[2, 3, 4],
pd.date_range("20130101", periods=3),
list("abc"),
[1, 3, 4],
]
# non-default index
for index in indexes:
df.index = index
if isinstance(index, pd.DatetimeIndex):
df.index = df.index._with_freq(None) # freq doesnt round-trip
check_round_trip(df, engine, check_names=check_names)
# index with meta-data
df.index = [0, 1, 2]
df.index.name = "foo"
check_round_trip(df, engine)
def test_write_multiindex(self, pa):
# Not supported in fastparquet as of 0.1.3 or older pyarrow version
engine = pa
df = pd.DataFrame({"A": [1, 2, 3]})
index = pd.MultiIndex.from_tuples([("a", 1), ("a", 2), ("b", 1)])
df.index = index
check_round_trip(df, engine)
def test_multiindex_with_columns(self, pa):
engine = pa
dates = pd.date_range("01-Jan-2018", "01-Dec-2018", freq="MS")
df = pd.DataFrame(np.random.randn(2 * len(dates), 3), columns=list("ABC"))
index1 = pd.MultiIndex.from_product(
[["Level1", "Level2"], dates], names=["level", "date"]
)
index2 = index1.copy(names=None)
for index in [index1, index2]:
df.index = index
check_round_trip(df, engine)
check_round_trip(
df, engine, read_kwargs={"columns": ["A", "B"]}, expected=df[["A", "B"]]
)
def test_write_ignoring_index(self, engine):
# ENH 20768
# Ensure index=False omits the index from the written Parquet file.
df = pd.DataFrame({"a": [1, 2, 3], "b": ["q", "r", "s"]})
write_kwargs = {"compression": None, "index": False}
# Because we're dropping the index, we expect the loaded dataframe to
# have the default integer index.
expected = df.reset_index(drop=True)
check_round_trip(df, engine, write_kwargs=write_kwargs, expected=expected)
# Ignore custom index
df = pd.DataFrame(
{"a": [1, 2, 3], "b": ["q", "r", "s"]}, index=["zyx", "wvu", "tsr"]
)
check_round_trip(df, engine, write_kwargs=write_kwargs, expected=expected)
# Ignore multi-indexes as well.
arrays = [
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
df = pd.DataFrame(
{"one": list(range(8)), "two": [-i for i in range(8)]}, index=arrays
)
expected = df.reset_index(drop=True)
check_round_trip(df, engine, write_kwargs=write_kwargs, expected=expected)
def test_write_column_multiindex(self, engine):
# Not able to write column multi-indexes with non-string column names.
mi_columns = pd.MultiIndex.from_tuples([("a", 1), ("a", 2), ("b", 1)])
df = pd.DataFrame(np.random.randn(4, 3), columns=mi_columns)
msg = (
r"\s*parquet must have string column names for all values in\s*"
"each level of the MultiIndex"
)
self.check_error_on_write(df, engine, ValueError, msg)
def test_write_column_multiindex_nonstring(self, pa):
# GH #34777
# Not supported in fastparquet as of 0.1.3
engine = pa
# Not able to write column multi-indexes with non-string column names
arrays = [
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
[1, 2, 1, 2, 1, 2, 1, 2],
]
df = pd.DataFrame(np.random.randn(8, 8), columns=arrays)
df.columns.names = ["Level1", "Level2"]
msg = (
r"\s*parquet must have string column names for all values in\s*"
"each level of the MultiIndex"
)
self.check_error_on_write(df, engine, ValueError, msg)
def test_write_column_multiindex_string(self, pa):
# GH #34777
# Not supported in fastparquet as of 0.1.3
engine = pa
# Write column multi-indexes with string column names
arrays = [
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
df = pd.DataFrame(np.random.randn(8, 8), columns=arrays)
df.columns.names = ["ColLevel1", "ColLevel2"]
check_round_trip(df, engine)
def test_write_column_index_string(self, pa):
# GH #34777
# Not supported in fastparquet as of 0.1.3
engine = pa
# Write column indexes with string column names
arrays = ["bar", "baz", "foo", "qux"]
df = pd.DataFrame(np.random.randn(8, 4), columns=arrays)
df.columns.name = "StringCol"
check_round_trip(df, engine)
def test_write_column_index_nonstring(self, pa):
# GH #34777
# Not supported in fastparquet as of 0.1.3
engine = pa
# Write column indexes with string column names
arrays = [1, 2, 3, 4]
df = pd.DataFrame(np.random.randn(8, 4), columns=arrays)
df.columns.name = "NonStringCol"
msg = r"parquet must have string column names"
self.check_error_on_write(df, engine, ValueError, msg)
class TestParquetPyArrow(Base):
def test_basic(self, pa, df_full):
df = df_full
# additional supported types for pyarrow
dti = pd.date_range("20130101", periods=3, tz="Europe/Brussels")
dti = dti._with_freq(None) # freq doesnt round-trip
df["datetime_tz"] = dti
df["bool_with_none"] = [True, None, True]
check_round_trip(df, pa)
def test_basic_subset_columns(self, pa, df_full):
# GH18628
df = df_full
# additional supported types for pyarrow
df["datetime_tz"] = pd.date_range("20130101", periods=3, tz="Europe/Brussels")
check_round_trip(
df,
pa,
expected=df[["string", "int"]],
read_kwargs={"columns": ["string", "int"]},
)
def test_to_bytes_without_path_or_buf_provided(self, pa, df_full):
# GH 37105
buf_bytes = df_full.to_parquet(engine=pa)
assert isinstance(buf_bytes, bytes)
buf_stream = BytesIO(buf_bytes)
res = pd.read_parquet(buf_stream)
tm.assert_frame_equal(df_full, res)
def test_duplicate_columns(self, pa):
# not currently able to handle duplicate columns
df = pd.DataFrame(np.arange(12).reshape(4, 3), columns=list("aaa")).copy()
self.check_error_on_write(df, pa, ValueError, "Duplicate column names found")
def test_unsupported(self, pa):
if LooseVersion(pyarrow.__version__) < LooseVersion("0.15.1.dev"):
# period - will be supported using an extension type with pyarrow 1.0
df = pd.DataFrame({"a": pd.period_range("2013", freq="M", periods=3)})
# pyarrow 0.11 raises ArrowTypeError
# older pyarrows raise ArrowInvalid
self.check_external_error_on_write(df, pa, pyarrow.ArrowException)
# timedelta
df = pd.DataFrame({"a": pd.timedelta_range("1 day", periods=3)})
self.check_external_error_on_write(df, pa, NotImplementedError)
# mixed python objects
df = pd.DataFrame({"a": ["a", 1, 2.0]})
# pyarrow 0.11 raises ArrowTypeError
# older pyarrows raise ArrowInvalid
self.check_external_error_on_write(df, pa, pyarrow.ArrowException)
def test_categorical(self, pa):
# supported in >= 0.7.0
df = pd.DataFrame()
df["a"] = pd.Categorical(list("abcdef"))
# test for null, out-of-order values, and unobserved category
df["b"] = pd.Categorical(
["bar", "foo", "foo", "bar", None, "bar"],
dtype=pd.CategoricalDtype(["foo", "bar", "baz"]),
)
# test for ordered flag
df["c"] = pd.Categorical(
["a", "b", "c", "a", "c", "b"], categories=["b", "c", "d"], ordered=True
)
if LooseVersion(pyarrow.__version__) >= LooseVersion("0.15.0"):
check_round_trip(df, pa)
else:
# de-serialized as object for pyarrow < 0.15
expected = df.astype(object)
check_round_trip(df, pa, expected=expected)
@pytest.mark.xfail(
is_platform_windows() and PY38,
reason="localhost connection rejected",
strict=False,
)
def test_s3_roundtrip_explicit_fs(self, df_compat, s3_resource, pa, s3so):
s3fs = pytest.importorskip("s3fs")
if LooseVersion(pyarrow.__version__) <= LooseVersion("0.17.0"):
pytest.skip()
s3 = s3fs.S3FileSystem(**s3so)
kw = {"filesystem": s3}
check_round_trip(
df_compat,
pa,
path="pandas-test/pyarrow.parquet",
read_kwargs=kw,
write_kwargs=kw,
)
def test_s3_roundtrip(self, df_compat, s3_resource, pa, s3so):
if LooseVersion(pyarrow.__version__) <= LooseVersion("0.17.0"):
pytest.skip()
# GH #19134
s3so = {"storage_options": s3so}
check_round_trip(
df_compat,
pa,
path="s3://pandas-test/pyarrow.parquet",
read_kwargs=s3so,
write_kwargs=s3so,
)
@td.skip_if_no("s3fs") # also requires flask
@pytest.mark.parametrize(
"partition_col",
[
["A"],
[],
],
)
def test_s3_roundtrip_for_dir(
self, df_compat, s3_resource, pa, partition_col, s3so
):
# GH #26388
expected_df = df_compat.copy()
# GH #35791
# read_table uses the new Arrow Datasets API since pyarrow 1.0.0
# Previous behaviour was pyarrow partitioned columns become 'category' dtypes
# These are added to back of dataframe on read. In new API category dtype is
# only used if partition field is string, but this changed again to use
# category dtype for all types (not only strings) in pyarrow 2.0.0
pa10 = (LooseVersion(pyarrow.__version__) >= LooseVersion("1.0.0")) and (
LooseVersion(pyarrow.__version__) < LooseVersion("2.0.0")
)
if partition_col:
if pa10:
partition_col_type = "int32"
else:
partition_col_type = "category"
expected_df[partition_col] = expected_df[partition_col].astype(
partition_col_type
)
check_round_trip(
df_compat,
pa,
expected=expected_df,
path="s3://pandas-test/parquet_dir",
read_kwargs={"storage_options": s3so},
write_kwargs={
"partition_cols": partition_col,
"compression": None,
"storage_options": s3so,
},
check_like=True,
repeat=1,
)
@td.skip_if_no("pyarrow")
def test_read_file_like_obj_support(self, df_compat):
buffer = BytesIO()
df_compat.to_parquet(buffer)
df_from_buf = pd.read_parquet(buffer)
tm.assert_frame_equal(df_compat, df_from_buf)
@td.skip_if_no("pyarrow")
def test_expand_user(self, df_compat, monkeypatch):
monkeypatch.setenv("HOME", "TestingUser")
monkeypatch.setenv("USERPROFILE", "TestingUser")
with pytest.raises(OSError, match=r".*TestingUser.*"):
pd.read_parquet("~/file.parquet")
with pytest.raises(OSError, match=r".*TestingUser.*"):
df_compat.to_parquet("~/file.parquet")
def test_partition_cols_supported(self, pa, df_full):
# GH #23283
partition_cols = ["bool", "int"]
df = df_full
with tm.ensure_clean_dir() as path:
df.to_parquet(path, partition_cols=partition_cols, compression=None)
import pyarrow.parquet as pq
dataset = pq.ParquetDataset(path, validate_schema=False)
assert len(dataset.partitions.partition_names) == 2
assert dataset.partitions.partition_names == set(partition_cols)
assert read_parquet(path).shape == df.shape
def test_partition_cols_string(self, pa, df_full):
# GH #27117
partition_cols = "bool"
partition_cols_list = [partition_cols]
df = df_full
with tm.ensure_clean_dir() as path:
df.to_parquet(path, partition_cols=partition_cols, compression=None)
import pyarrow.parquet as pq
dataset = pq.ParquetDataset(path, validate_schema=False)
assert len(dataset.partitions.partition_names) == 1
assert dataset.partitions.partition_names == set(partition_cols_list)
assert read_parquet(path).shape == df.shape
@pytest.mark.parametrize("path_type", [str, pathlib.Path])
def test_partition_cols_pathlib(self, pa, df_compat, path_type):
# GH 35902
partition_cols = "B"
partition_cols_list = [partition_cols]
df = df_compat
with tm.ensure_clean_dir() as path_str:
path = path_type(path_str)
df.to_parquet(path, partition_cols=partition_cols_list)
assert read_parquet(path).shape == df.shape
def test_empty_dataframe(self, pa):
# GH #27339
df = pd.DataFrame()
check_round_trip(df, pa)
def test_write_with_schema(self, pa):
import pyarrow
df = pd.DataFrame({"x": [0, 1]})
schema = pyarrow.schema([pyarrow.field("x", type=pyarrow.bool_())])
out_df = df.astype(bool)
check_round_trip(df, pa, write_kwargs={"schema": schema}, expected=out_df)
@td.skip_if_no("pyarrow", min_version="0.15.0")
def test_additional_extension_arrays(self, pa):
# test additional ExtensionArrays that are supported through the
# __arrow_array__ protocol
df = pd.DataFrame(
{
"a": pd.Series([1, 2, 3], dtype="Int64"),
"b": pd.Series([1, 2, 3], dtype="UInt32"),
"c": pd.Series(["a", None, "c"], dtype="string"),
}
)
if LooseVersion(pyarrow.__version__) >= LooseVersion("0.16.0"):
expected = df
else:
# de-serialized as plain int / object
expected = df.assign(
a=df.a.astype("int64"), b=df.b.astype("int64"), c=df.c.astype("object")
)
check_round_trip(df, pa, expected=expected)
df = pd.DataFrame({"a": pd.Series([1, 2, 3, None], dtype="Int64")})
if LooseVersion(pyarrow.__version__) >= LooseVersion("0.16.0"):
expected = df
else:
# if missing values in integer, currently de-serialized as float
expected = df.assign(a=df.a.astype("float64"))
check_round_trip(df, pa, expected=expected)
@td.skip_if_no("pyarrow", min_version="0.16.0")
def test_additional_extension_types(self, pa):
# test additional ExtensionArrays that are supported through the
# __arrow_array__ protocol + by defining a custom ExtensionType
df = pd.DataFrame(
{
# Arrow does not yet support struct in writing to Parquet (ARROW-1644)
# "c": pd.arrays.IntervalArray.from_tuples([(0, 1), (1, 2), (3, 4)]),
"d": pd.period_range("2012-01-01", periods=3, freq="D"),
}
)
check_round_trip(df, pa)
@td.skip_if_no("pyarrow", min_version="0.16")
def test_use_nullable_dtypes(self, pa):
import pyarrow.parquet as pq
table = pyarrow.table(
{
"a": pyarrow.array([1, 2, 3, None], "int64"),
"b": pyarrow.array([1, 2, 3, None], "uint8"),
"c": pyarrow.array(["a", "b", "c", None]),
"d": pyarrow.array([True, False, True, None]),
}
)
with tm.ensure_clean() as path:
# write manually with pyarrow to write integers
pq.write_table(table, path)
result1 = read_parquet(path)
result2 = read_parquet(path, use_nullable_dtypes=True)
assert result1["a"].dtype == np.dtype("float64")
expected = pd.DataFrame(
{
"a": pd.array([1, 2, 3, None], dtype="Int64"),
"b": pd.array([1, 2, 3, None], dtype="UInt8"),
"c": pd.array(["a", "b", "c", None], dtype="string"),
"d": pd.array([True, False, True, None], dtype="boolean"),
}
)
tm.assert_frame_equal(result2, expected)
@td.skip_if_no("pyarrow", min_version="0.14")
def test_timestamp_nanoseconds(self, pa):
# with version 2.0, pyarrow defaults to writing the nanoseconds, so
# this should work without error
df = pd.DataFrame({"a": pd.date_range("2017-01-01", freq="1n", periods=10)})
check_round_trip(df, pa, write_kwargs={"version": "2.0"})
def test_timezone_aware_index(self, pa, timezone_aware_date_list):
if LooseVersion(pyarrow.__version__) >= LooseVersion("2.0.0"):
# temporary skip this test until it is properly resolved
# https://github.com/pandas-dev/pandas/issues/37286
pytest.skip()
idx = 5 * [timezone_aware_date_list]
df = pd.DataFrame(index=idx, data={"index_as_col": idx})
# see gh-36004
# compare time(zone) values only, skip their class:
# pyarrow always creates fixed offset timezones using pytz.FixedOffset()
# even if it was datetime.timezone() originally
#
# technically they are the same:
# they both implement datetime.tzinfo
# they both wrap datetime.timedelta()
# this use-case sets the resolution to 1 minute
check_round_trip(df, pa, check_dtype=False)
@td.skip_if_no("pyarrow", min_version="1.0.0")
def test_filter_row_groups(self, pa):
# https://github.com/pandas-dev/pandas/issues/26551
df = pd.DataFrame({"a": list(range(0, 3))})
with tm.ensure_clean() as path:
df.to_parquet(path, pa)
result = read_parquet(
path, pa, filters=[("a", "==", 0)], use_legacy_dataset=False
)
assert len(result) == 1
class TestParquetFastParquet(Base):
@td.skip_if_no("fastparquet", min_version="0.3.2")
def test_basic(self, fp, df_full):
df = df_full
dti = pd.date_range("20130101", periods=3, tz="US/Eastern")
dti = dti._with_freq(None) # freq doesnt round-trip
df["datetime_tz"] = dti
df["timedelta"] = pd.timedelta_range("1 day", periods=3)
check_round_trip(df, fp)
@pytest.mark.skip(reason="not supported")
def test_duplicate_columns(self, fp):
# not currently able to handle duplicate columns
df = pd.DataFrame(np.arange(12).reshape(4, 3), columns=list("aaa")).copy()
msg = "Cannot create parquet dataset with duplicate column names"
self.check_error_on_write(df, fp, ValueError, msg)
def test_bool_with_none(self, fp):
df = pd.DataFrame({"a": [True, None, False]})
expected = pd.DataFrame({"a": [1.0, np.nan, 0.0]}, dtype="float16")
check_round_trip(df, fp, expected=expected)
def test_unsupported(self, fp):
# period
df = pd.DataFrame({"a": pd.period_range("2013", freq="M", periods=3)})
self.check_error_on_write(df, fp, ValueError, "cannot infer type for")
# mixed
df = pd.DataFrame({"a": ["a", 1, 2.0]})
msg = "Can't infer object conversion type"
self.check_error_on_write(df, fp, ValueError, msg)
def test_categorical(self, fp):
df = pd.DataFrame({"a": pd.Categorical(list("abc"))})
check_round_trip(df, fp)
def test_filter_row_groups(self, fp):
d = {"a": list(range(0, 3))}
df = pd.DataFrame(d)
with tm.ensure_clean() as path:
df.to_parquet(path, fp, compression=None, row_group_offsets=1)
result = read_parquet(path, fp, filters=[("a", "==", 0)])
assert len(result) == 1
def test_s3_roundtrip(self, df_compat, s3_resource, fp, s3so):
# GH #19134
check_round_trip(
df_compat,
fp,
path="s3://pandas-test/fastparquet.parquet",
read_kwargs={"storage_options": s3so},
write_kwargs={"compression": None, "storage_options": s3so},
)
def test_partition_cols_supported(self, fp, df_full):
# GH #23283
partition_cols = ["bool", "int"]
df = df_full
with tm.ensure_clean_dir() as path:
df.to_parquet(
path,
engine="fastparquet",
partition_cols=partition_cols,
compression=None,
)
assert os.path.exists(path)
import fastparquet
actual_partition_cols = fastparquet.ParquetFile(path, False).cats
assert len(actual_partition_cols) == 2
def test_partition_cols_string(self, fp, df_full):
# GH #27117
partition_cols = "bool"
df = df_full
with tm.ensure_clean_dir() as path:
df.to_parquet(
path,
engine="fastparquet",
partition_cols=partition_cols,
compression=None,
)
assert os.path.exists(path)
import fastparquet
actual_partition_cols = fastparquet.ParquetFile(path, False).cats
assert len(actual_partition_cols) == 1
def test_partition_on_supported(self, fp, df_full):
# GH #23283
partition_cols = ["bool", "int"]
df = df_full
with tm.ensure_clean_dir() as path:
df.to_parquet(
path,
engine="fastparquet",
compression=None,
partition_on=partition_cols,
)
assert os.path.exists(path)
import fastparquet
actual_partition_cols = fastparquet.ParquetFile(path, False).cats
assert len(actual_partition_cols) == 2
def test_error_on_using_partition_cols_and_partition_on(self, fp, df_full):
# GH #23283
partition_cols = ["bool", "int"]
df = df_full
msg = (
"Cannot use both partition_on and partition_cols. Use partition_cols for "
"partitioning data"
)
with pytest.raises(ValueError, match=msg):
with tm.ensure_clean_dir() as path:
df.to_parquet(
path,
engine="fastparquet",
compression=None,
partition_on=partition_cols,
partition_cols=partition_cols,
)
def test_empty_dataframe(self, fp):
# GH #27339
df = pd.DataFrame()
expected = df.copy()
expected.index.name = "index"
check_round_trip(df, fp, expected=expected)
def test_timezone_aware_index(self, fp, timezone_aware_date_list):
idx = 5 * [timezone_aware_date_list]
df = pd.DataFrame(index=idx, data={"index_as_col": idx})
expected = df.copy()
expected.index.name = "index"
check_round_trip(df, fp, expected=expected)
def test_use_nullable_dtypes_not_supported(self, fp):
df = pd.DataFrame({"a": [1, 2]})
with tm.ensure_clean() as path:
df.to_parquet(path)
with pytest.raises(ValueError, match="not supported for the fastparquet"):
read_parquet(path, engine="fastparquet", use_nullable_dtypes=True)
| bsd-3-clause |
arahuja/scikit-learn | sklearn/manifold/isomap.py | 36 | 7119 | """Isomap for manifold learning"""
# Author: Jake Vanderplas -- <[email protected]>
# License: BSD 3 clause (C) 2011
import numpy as np
from ..base import BaseEstimator, TransformerMixin
from ..neighbors import NearestNeighbors, kneighbors_graph
from ..utils import check_array
from ..utils.graph import graph_shortest_path
from ..decomposition import KernelPCA
from ..preprocessing import KernelCenterer
class Isomap(BaseEstimator, TransformerMixin):
"""Isomap Embedding
Non-linear dimensionality reduction through Isometric Mapping
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
eigen_solver : ['auto'|'arpack'|'dense']
'auto' : Attempt to choose the most efficient solver
for the given problem.
'arpack' : Use Arnoldi decomposition to find the eigenvalues
and eigenvectors.
'dense' : Use a direct solver (i.e. LAPACK)
for the eigenvalue decomposition.
tol : float
Convergence tolerance passed to arpack or lobpcg.
not used if eigen_solver == 'dense'.
max_iter : integer
Maximum number of iterations for the arpack solver.
not used if eigen_solver == 'dense'.
path_method : string ['auto'|'FW'|'D']
Method to use in finding shortest path.
'auto' : attempt to choose the best algorithm automatically.
'FW' : Floyd-Warshall algorithm.
'D' : Dijkstra's algorithm.
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
Algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kernel_pca_ : object
`KernelPCA` object used to implement the embedding.
training_data_ : array-like, shape (n_samples, n_features)
Stores the training data.
nbrs_ : sklearn.neighbors.NearestNeighbors instance
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
dist_matrix_ : array-like, shape (n_samples, n_samples)
Stores the geodesic distance matrix of training data.
References
----------
.. [1] Tenenbaum, J.B.; De Silva, V.; & Langford, J.C. A global geometric
framework for nonlinear dimensionality reduction. Science 290 (5500)
"""
def __init__(self, n_neighbors=5, n_components=2, eigen_solver='auto',
tol=0, max_iter=None, path_method='auto',
neighbors_algorithm='auto'):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.path_method = path_method
self.neighbors_algorithm = neighbors_algorithm
self.nbrs_ = NearestNeighbors(n_neighbors=n_neighbors,
algorithm=neighbors_algorithm)
def _fit_transform(self, X):
X = check_array(X)
self.nbrs_.fit(X)
self.training_data_ = self.nbrs_._fit_X
self.kernel_pca_ = KernelPCA(n_components=self.n_components,
kernel="precomputed",
eigen_solver=self.eigen_solver,
tol=self.tol, max_iter=self.max_iter)
kng = kneighbors_graph(self.nbrs_, self.n_neighbors,
mode='distance')
self.dist_matrix_ = graph_shortest_path(kng,
method=self.path_method,
directed=False)
G = self.dist_matrix_ ** 2
G *= -0.5
self.embedding_ = self.kernel_pca_.fit_transform(G)
def reconstruction_error(self):
"""Compute the reconstruction error for the embedding.
Returns
-------
reconstruction_error : float
Notes
-------
The cost function of an isomap embedding is
``E = frobenius_norm[K(D) - K(D_fit)] / n_samples``
Where D is the matrix of distances for the input data X,
D_fit is the matrix of distances for the output embedding X_fit,
and K is the isomap kernel:
``K(D) = -0.5 * (I - 1/n_samples) * D^2 * (I - 1/n_samples)``
"""
G = -0.5 * self.dist_matrix_ ** 2
G_center = KernelCenterer().fit_transform(G)
evals = self.kernel_pca_.lambdas_
return np.sqrt(np.sum(G_center ** 2) - np.sum(evals ** 2)) / G.shape[0]
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, precomputed tree, or NearestNeighbors
object.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model from data in X and transform X.
Parameters
----------
X: {array-like, sparse matrix, BallTree, KDTree}
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""Transform X.
This is implemented by linking the points X into the graph of geodesic
distances of the training data. First the `n_neighbors` nearest
neighbors of X are found in the training data, and from these the
shortest geodesic distances from each point in X to each point in
the training data are computed in order to construct the kernel.
The embedding of X is the projection of this kernel onto the
embedding vectors of the training set.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
X = check_array(X)
distances, indices = self.nbrs_.kneighbors(X, return_distance=True)
#Create the graph of shortest distances from X to self.training_data_
# via the nearest neighbors of X.
#This can be done as a single array operation, but it potentially
# takes a lot of memory. To avoid that, use a loop:
G_X = np.zeros((X.shape[0], self.training_data_.shape[0]))
for i in range(X.shape[0]):
G_X[i] = np.min((self.dist_matrix_[indices[i]]
+ distances[i][:, None]), 0)
G_X **= 2
G_X *= -0.5
return self.kernel_pca_.transform(G_X)
| bsd-3-clause |
dshen1/trading-with-python | spreadApp/makeDist.py | 77 | 1720 | from distutils.core import setup
import py2exe
manifest_template = '''
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<assemblyIdentity
version="5.0.0.0"
processorArchitecture="x86"
name="%(prog)s"
type="win32"
/>
<description>%(prog)s Program</description>
<dependency>
<dependentAssembly>
<assemblyIdentity
type="win32"
name="Microsoft.Windows.Common-Controls"
version="6.0.0.0"
processorArchitecture="X86"
publicKeyToken="6595b64144ccf1df"
language="*"
/>
</dependentAssembly>
</dependency>
</assembly>
'''
RT_MANIFEST = 24
import matplotlib
opts = {
'py2exe': {
"compressed": 1,
"bundle_files" : 3,
"includes" : ["sip",
"matplotlib.backends",
"matplotlib.backends.backend_qt4agg",
"pylab", "numpy",
"matplotlib.backends.backend_tkagg"],
'excludes': ['_gtkagg', '_tkagg', '_agg2',
'_cairo', '_cocoaagg',
'_fltkagg', '_gtk', '_gtkcairo', ],
'dll_excludes': ['libgdk-win32-2.0-0.dll',
'libgobject-2.0-0.dll']
}
}
setup(name="triton",
version = "0.1",
scripts=["spreadScanner.pyw"],
windows=[{"script": "spreadScanner.pyw"}],
options=opts,
data_files=matplotlib.get_py2exe_datafiles(),
other_resources = [(RT_MANIFEST, 1, manifest_template % dict(prog="spreadDetective"))],
zipfile = None) | bsd-3-clause |
rahuldhote/scikit-learn | examples/classification/plot_lda_qda.py | 164 | 4806 | """
====================================================================
Linear and Quadratic Discriminant Analysis with confidence ellipsoid
====================================================================
Plot the confidence ellipsoids of each class and decision boundary
"""
print(__doc__)
from scipy import linalg
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib import colors
from sklearn.lda import LDA
from sklearn.qda import QDA
###############################################################################
# colormap
cmap = colors.LinearSegmentedColormap(
'red_blue_classes',
{'red': [(0, 1, 1), (1, 0.7, 0.7)],
'green': [(0, 0.7, 0.7), (1, 0.7, 0.7)],
'blue': [(0, 0.7, 0.7), (1, 1, 1)]})
plt.cm.register_cmap(cmap=cmap)
###############################################################################
# generate datasets
def dataset_fixed_cov():
'''Generate 2 Gaussians samples with the same covariance matrix'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -0.23], [0.83, .23]])
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C) + np.array([1, 1])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
def dataset_cov():
'''Generate 2 Gaussians samples with different covariance matrices'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -1.], [2.5, .7]]) * 2.
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C.T) + np.array([1, 4])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
###############################################################################
# plot functions
def plot_data(lda, X, y, y_pred, fig_index):
splot = plt.subplot(2, 2, fig_index)
if fig_index == 1:
plt.title('Linear Discriminant Analysis')
plt.ylabel('Data with fixed covariance')
elif fig_index == 2:
plt.title('Quadratic Discriminant Analysis')
elif fig_index == 3:
plt.ylabel('Data with varying covariances')
tp = (y == y_pred) # True Positive
tp0, tp1 = tp[y == 0], tp[y == 1]
X0, X1 = X[y == 0], X[y == 1]
X0_tp, X0_fp = X0[tp0], X0[~tp0]
X1_tp, X1_fp = X1[tp1], X1[~tp1]
xmin, xmax = X[:, 0].min(), X[:, 0].max()
ymin, ymax = X[:, 1].min(), X[:, 1].max()
# class 0: dots
plt.plot(X0_tp[:, 0], X0_tp[:, 1], 'o', color='red')
plt.plot(X0_fp[:, 0], X0_fp[:, 1], '.', color='#990000') # dark red
# class 1: dots
plt.plot(X1_tp[:, 0], X1_tp[:, 1], 'o', color='blue')
plt.plot(X1_fp[:, 0], X1_fp[:, 1], '.', color='#000099') # dark blue
# class 0 and 1 : areas
nx, ny = 200, 100
x_min, x_max = plt.xlim()
y_min, y_max = plt.ylim()
xx, yy = np.meshgrid(np.linspace(x_min, x_max, nx),
np.linspace(y_min, y_max, ny))
Z = lda.predict_proba(np.c_[xx.ravel(), yy.ravel()])
Z = Z[:, 1].reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap='red_blue_classes',
norm=colors.Normalize(0., 1.))
plt.contour(xx, yy, Z, [0.5], linewidths=2., colors='k')
# means
plt.plot(lda.means_[0][0], lda.means_[0][1],
'o', color='black', markersize=10)
plt.plot(lda.means_[1][0], lda.means_[1][1],
'o', color='black', markersize=10)
return splot
def plot_ellipse(splot, mean, cov, color):
v, w = linalg.eigh(cov)
u = w[0] / linalg.norm(w[0])
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
# filled Gaussian at 2 standard deviation
ell = mpl.patches.Ellipse(mean, 2 * v[0] ** 0.5, 2 * v[1] ** 0.5,
180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
splot.set_xticks(())
splot.set_yticks(())
def plot_lda_cov(lda, splot):
plot_ellipse(splot, lda.means_[0], lda.covariance_, 'red')
plot_ellipse(splot, lda.means_[1], lda.covariance_, 'blue')
def plot_qda_cov(qda, splot):
plot_ellipse(splot, qda.means_[0], qda.covariances_[0], 'red')
plot_ellipse(splot, qda.means_[1], qda.covariances_[1], 'blue')
###############################################################################
for i, (X, y) in enumerate([dataset_fixed_cov(), dataset_cov()]):
# LDA
lda = LDA(solver="svd", store_covariance=True)
y_pred = lda.fit(X, y).predict(X)
splot = plot_data(lda, X, y, y_pred, fig_index=2 * i + 1)
plot_lda_cov(lda, splot)
plt.axis('tight')
# QDA
qda = QDA()
y_pred = qda.fit(X, y, store_covariances=True).predict(X)
splot = plot_data(qda, X, y, y_pred, fig_index=2 * i + 2)
plot_qda_cov(qda, splot)
plt.axis('tight')
plt.suptitle('LDA vs QDA')
plt.show()
| bsd-3-clause |
joshbohde/scikit-learn | examples/cluster/plot_mean_shift.py | 7 | 1795 | """
=============================================
A demo of the mean-shift clustering algorithm
=============================================
Reference:
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
print __doc__
import numpy as np
from sklearn.cluster import MeanShift, estimate_bandwidth
from sklearn.datasets.samples_generator import make_blobs
###############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, _ = make_blobs(n_samples=10000, centers=centers, cluster_std=0.6)
###############################################################################
# Compute clustering with MeanShift
# The following bandwidth can be automatically detected using
bandwidth = estimate_bandwidth(X, quantile=0.2, n_samples=500)
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(X)
labels = ms.labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
print "number of estimated clusters : %d" % n_clusters_
###############################################################################
# Plot result
import pylab as pl
from itertools import cycle
pl.figure(1)
pl.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
my_members = labels == k
cluster_center = cluster_centers[k]
pl.plot(X[my_members, 0], X[my_members, 1], col + '.')
pl.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
pl.title('Estimated number of clusters: %d' % n_clusters_)
pl.show()
| bsd-3-clause |
jms-dipadua/financial-forecasting | file_mergers.py | 1 | 7182 | import numpy as np
import pandas as pd
from glob import glob # TODO: add in directory of files to loop through...
#import sys
from collections import defaultdict
import csv
import math
class Calendar:
def __init__(self):
self.month_max_days = {
1: 31,
2: 28,
3: 31,
4: 30,
5: 31,
6: 30,
7: 31,
8: 31,
9: 30,
10: 31,
11: 30,
12: 31
}
def max_days_in_month(self, month_num):
return self.month_max_days[month_num]
def is_leap_yr(self, year_num):
if year_num % 4 > 0:
return False
def max_days_yr(self):
if self.is_leap_yr(self.year_num) == True:
return 366
else:
return 365
def split_dates(self, date):
date_els = date.split('/')
month = int(date_els[0])
day = int(date_els[1])
year = int(date_els[2])
return day, month, year
def create_dates(self, day, month, year, days_in_month, periodicity):
date_array = []
day = int(day)
month = int(month)
year = int(year)
# put the first date into the array
date = str(month) +'/'+ str(day) + '/' + str(year)
date_array.append(date)
# now make the remainder of the days
for i in range(1, periodicity):
day += 1
if day > days_in_month:
day = 1
month += 1
if month > 12:
month = 1
year += 1
date = str(month) +'/'+ str(day) + '/' + str(year)
#date = date.strip() # was just to make sure. probably don't need.
date_array.append(date)
return date_array
class Source:
def __init__(self):
self.calendar = Calendar() # we'll use this generic calendar as a look-up helper class
self.get_params()
self.read_file(self.base_file, 1) # base file
self.initial_data_drop() # i.e. drop IDs from df
""" BULK OF WORK HERE
# LOOP THROUGH EACH MERGE file
# READ IT, MATCH IT (AND CREATE ALT DATES AS NEEDED)
"""
for merge_f in self.merge_files:
self.read_file(merge_f, 2)
merge_periodicity = self.merge_f_dict[merge_f]
if merge_periodicity > 1:
# create all the alternate dates
merge_dataframe = self.expand_raw_merge_data(merge_periodicity)
else:
merge_dataframe = self.raw_data_merge
# with a "cleanish" dataframe in hand, merge the data
self.merge_data(merge_dataframe)
print self.dataframe.shape
self.calc_PE()
self.write_finished_file()
def get_params(self):
# self.base_files = [] TO DO... (faster / less-error pron than manually one-at-a-time)
self.base_file = raw_input("BASE file: ") # base file
#self.base_file_period = int(raw_input("period of original file: 1 == daily, 2 == weekly, 3 == monthly, 4 == quarterly")) # base file
#financials_file = raw_input("MERGE file name, NO directory: ") # financials file (earnings, etc)
company_ticker = raw_input("Company Ticker: ")
self.financials_file = "financials/financials-"+company_ticker+".csv"
self.merge_files = [
'10-yr-tres.csv',
'brent.csv',
'civil-labor-part-rate.csv',
'civil-unemployment-rate.csv',
'cpi.csv',
'fed-debt-to-GDP.csv',
'housing-start.csv',
'initial-claims.csv',
'libor.csv',
'personal-consumption-expend.csv',
'personal-savings-rate.csv',
'real-gdp.csv',
#'real-median-hh-income.csv', # this is pending 2015 release!!
'sp500.csv',
'usd-euro.csv'
]
self.merge_files.append(self.financials_file)
self.merge_f_dict = {
# 1: daily, 7: weekly, 31: monthly, 91: quarterly, 365: annual
# we assign the numbers like this because we're going to use them later...saves a conversion step
'10-yr-tres.csv': 1,
'brent.csv': 1,
'civil-labor-part-rate.csv': 32,
'civil-unemployment-rate.csv': 32,
'cpi.csv':32,
'fed-debt-to-GDP.csv':93,
'housing-start.csv': 32,
'initial-claims.csv': 7,
'libor.csv':1,
'personal-consumption-expend.csv': 32,
'personal-savings-rate.csv': 32,
'real-gdp.csv': 93,
'real-median-hh-income.csv': 365,
'sp500.csv': 1,
'usd-euro.csv': 1,
self.financials_file: 93
}
self.root_base = 'data/transformed/v5/' # this is where we'll version things
self.root_merge = 'data/fundamentals/v5/'
self.fin_file_name = raw_input("Name for Final File: ")
self.fin_file_name = 'data/working/v5/' + self.fin_file_name
def read_file(self, file_name, f_type):
# TYPE: base (1) vs merge (2)
# Read data
print "reading file data for % r" % file_name
if f_type == 1:
self.raw_data_base = pd.read_csv(self.root_base+file_name, dtype = str)
elif f_type == 2:
self.raw_data_merge = pd.read_csv(self.root_merge+file_name, dtype = str)
def initial_data_drop(self):
columns = list(self.raw_data_base.columns.values)
if 'id' in columns:
self.dataframe = self.raw_data_base.drop(['id'], axis=1)
else:
self.dataframe = self.raw_data_base
def merge_data(self, merge_dataframe):
self.dataframe = self.dataframe.merge(merge_dataframe, how='inner', on='Date')
self.dataframe.drop_duplicates(subset='Date', keep='last', inplace=True)
def expand_raw_merge_data(self, periodicity):
dates = self.raw_data_merge['Date']
# two empty arrays that we'll append values to
# then, later, make into np.arrays and then create a dataframe with
all_dates = []
all_date_vals = []
for date in dates:
#print date
day, month, year = self.calendar.split_dates(date) # <-- need a way to get the "previous day" so that you can fix the information leak
# get the original date from the merge data frame (to get its "cell" value)
tmp_df = self.raw_data_merge.loc[self.raw_data_merge['Date'] == date] # dataframe row
tmp_df_cols = list(tmp_df.columns.values) # column headers, also used below
#print tmp_df_cols
date_value = tmp_df.iloc[0][tmp_df_cols[1:]] # the value of that specific date # <- THIS IS WHERE YOU CAN NAB THE FULL DATA SET...RATHER THAN JUST ONE COL
#print "%r %r %r" % (day, month, year)
days_in_month = self.calendar.max_days_in_month(month)
if (month == 2 and self.calendar.is_leap_yr(year) == True): # annoying control for leap years
days_in_month += 1
new_dates = self.calendar.create_dates(day, month, year, days_in_month, periodicity)
# new_dates is an array
# we want to get single value entries so we have to loop through them (unfortunately)
for nd in new_dates:
all_date_vals.append(date_value)
all_dates.append(nd)
# then make a dataframe with all this stuff
all_dates = np.array([all_dates]) # this will make a row vector
# we transpose to get the column vector
all_dates = np.transpose(all_dates)
all_date_vals = np.array(all_date_vals)
#print all_dates.shape
#print all_date_vals.shape
merged_dates_date_vals = np.hstack((all_dates, all_date_vals))
new_merge_df = pd.DataFrame(merged_dates_date_vals, columns = tmp_df_cols)
#print new_merge_df
# return it to the calling function
return new_merge_df
def calc_PE(self):
self.dataframe['PE'] = pd.to_numeric(self.dataframe['Close']) / pd.to_numeric(self.dataframe['EPS'])
def write_finished_file(self):
print self.dataframe.shape
final_file = self.dataframe.to_csv(self.fin_file_name,index_label='id')
if __name__ == "__main__":
transformation = Source()
| gpl-3.0 |
jseabold/statsmodels | statsmodels/stats/tests/test_sandwich.py | 5 | 3592 | # -*- coding: utf-8 -*-
"""Tests for sandwich robust covariance estimation
see also in regression for cov_hac compared to Gretl and
sandbox.panel test_random_panel for comparing cov_cluster, cov_hac_panel and
cov_white
Created on Sat Dec 17 08:39:16 2011
Author: Josef Perktold
"""
import numpy as np
from numpy.testing import assert_almost_equal
from statsmodels.regression.linear_model import OLS
from statsmodels.tools.tools import add_constant
import statsmodels.stats.sandwich_covariance as sw
def test_cov_cluster_2groups():
#comparing cluster robust standard errors to Peterson
#requires Petersen's test_data
#http://www.kellogg.northwestern.edu/faculty/petersen/htm/papers/se/test_data.txt
import os
cur_dir = os.path.abspath(os.path.dirname(__file__))
fpath = os.path.join(cur_dir,"test_data.txt")
pet = np.genfromtxt(fpath)
endog = pet[:,-1]
group = pet[:,0].astype(int)
time = pet[:,1].astype(int)
exog = add_constant(pet[:,2])
res = OLS(endog, exog).fit()
cov01, covg, covt = sw.cov_cluster_2groups(res, group, group2=time)
#Reference number from Petersen
#http://www.kellogg.northwestern.edu/faculty/petersen/htm/papers/se/test_data.htm
bse_petw = [0.0284, 0.0284]
bse_pet0 = [0.0670, 0.0506]
bse_pet1 = [0.0234, 0.0334] #year
bse_pet01 = [0.0651, 0.0536] #firm and year
bse_0 = sw.se_cov(covg)
bse_1 = sw.se_cov(covt)
bse_01 = sw.se_cov(cov01)
#print res.HC0_se, bse_petw - res.HC0_se
#print bse_0, bse_0 - bse_pet0
#print bse_1, bse_1 - bse_pet1
#print bse_01, bse_01 - bse_pet01
assert_almost_equal(bse_petw, res.HC0_se, decimal=4)
assert_almost_equal(bse_0, bse_pet0, decimal=4)
assert_almost_equal(bse_1, bse_pet1, decimal=4)
assert_almost_equal(bse_01, bse_pet01, decimal=4)
def test_hac_simple():
from statsmodels.datasets import macrodata
d2 = macrodata.load_pandas().data
g_gdp = 400*np.diff(np.log(d2['realgdp'].values))
g_inv = 400*np.diff(np.log(d2['realinv'].values))
exogg = add_constant(np.c_[g_gdp, d2['realint'][:-1].values])
res_olsg = OLS(g_inv, exogg).fit()
#> NeweyWest(fm, lag = 4, prewhite = FALSE, sandwich = TRUE, verbose=TRUE, adjust=TRUE)
#Lag truncation parameter chosen: 4
# (Intercept) ggdp lint
cov1_r = [[ 1.40643899878678802, -0.3180328707083329709, -0.060621111216488610],
[ -0.31803287070833292, 0.1097308348999818661, 0.000395311760301478],
[ -0.06062111121648865, 0.0003953117603014895, 0.087511528912470993]]
#> NeweyWest(fm, lag = 4, prewhite = FALSE, sandwich = TRUE, verbose=TRUE, adjust=FALSE)
#Lag truncation parameter chosen: 4
# (Intercept) ggdp lint
cov2_r = [[ 1.3855512908840137, -0.313309610252268500, -0.059720797683570477],
[ -0.3133096102522685, 0.108101169035130618, 0.000389440793564339],
[ -0.0597207976835705, 0.000389440793564336, 0.086211852740503622]]
cov1 = sw.cov_hac_simple(res_olsg, nlags=4, use_correction=True)
se1 = sw.se_cov(cov1)
cov2 = sw.cov_hac_simple(res_olsg, nlags=4, use_correction=False)
se2 = sw.se_cov(cov2)
assert_almost_equal(cov1, cov1_r, decimal=14)
assert_almost_equal(cov2, cov2_r, decimal=14)
# compare default for nlags
cov3 = sw.cov_hac_simple(res_olsg, use_correction=False)
cov4 = sw.cov_hac_simple(res_olsg, nlags=4, use_correction=False)
assert_almost_equal(cov3, cov4, decimal=14)
| bsd-3-clause |
sargas/scipy | scipy/signal/waveforms.py | 1 | 14473 | # Author: Travis Oliphant
# 2003
#
# Feb. 2010: Updated by Warren Weckesser:
# Rewrote much of chirp()
# Added sweep_poly()
from __future__ import division, print_function, absolute_import
from numpy import asarray, zeros, place, nan, mod, pi, extract, log, sqrt, \
exp, cos, sin, polyval, polyint
__all__ = ['sawtooth', 'square', 'gausspulse', 'chirp', 'sweep_poly']
def sawtooth(t, width=1):
"""
Return a periodic sawtooth or triangle waveform.
The sawtooth waveform has a period ``2*pi``, rises from -1 to 1 on the
interval 0 to ``width*2*pi``, then drops from 1 to -1 on the interval
``width*2*pi`` to ``2*pi``. `width` must be in the interval [0, 1].
Note that this is not band-limited. It produces an infinite number
of harmonics, which are aliased back and forth across the frequency
spectrum.
Parameters
----------
t : array_like
Time.
width : array_like, optional
Width of the rising ramp as a proportion of the total cycle.
Default is 1, producing a rising ramp, while 0 produces a falling
ramp. `t` = 0.5 produces a triangle wave.
If an array, causes wave shape to change over time, and must be the
same length as t.
Returns
-------
y : ndarray
Output array containing the sawtooth waveform.
Examples
--------
A 5 Hz waveform sampled at 500 Hz for 1 second:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(0, 1, 500)
>>> plt.plot(t, signal.sawtooth(2 * np.pi * 5 * t))
"""
t, w = asarray(t), asarray(width)
w = asarray(w + (t - t))
t = asarray(t + (w - w))
if t.dtype.char in ['fFdD']:
ytype = t.dtype.char
else:
ytype = 'd'
y = zeros(t.shape, ytype)
# width must be between 0 and 1 inclusive
mask1 = (w > 1) | (w < 0)
place(y, mask1, nan)
# take t modulo 2*pi
tmod = mod(t, 2 * pi)
# on the interval 0 to width*2*pi function is
# tmod / (pi*w) - 1
mask2 = (1 - mask1) & (tmod < w * 2 * pi)
tsub = extract(mask2, tmod)
wsub = extract(mask2, w)
place(y, mask2, tsub / (pi * wsub) - 1)
# on the interval width*2*pi to 2*pi function is
# (pi*(w+1)-tmod) / (pi*(1-w))
mask3 = (1 - mask1) & (1 - mask2)
tsub = extract(mask3, tmod)
wsub = extract(mask3, w)
place(y, mask3, (pi * (wsub + 1) - tsub) / (pi * (1 - wsub)))
return y
def square(t, duty=0.5):
"""
Return a periodic square-wave waveform.
The square wave has a period ``2*pi``, has value +1 from 0 to
``2*pi*duty`` and -1 from ``2*pi*duty`` to ``2*pi``. `duty` must be in
the interval [0,1].
Note that this is not band-limited. It produces an infinite number
of harmonics, which are aliased back and forth across the frequency
spectrum.
Parameters
----------
t : array_like
The input time array.
duty : array_like, optional
Duty cycle. Default is 0.5 (50% duty cycle).
If an array, causes wave shape to change over time, and must be the
same length as t.
Returns
-------
y : ndarray
Output array containing the square waveform.
Examples
--------
A 5 Hz waveform sampled at 500 Hz for 1 second:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(0, 1, 500, endpoint=False)
>>> plt.plot(t, signal.square(2 * np.pi * 5 * t))
>>> plt.ylim(-2, 2)
A pulse-width modulated sine wave:
>>> sig = np.sin(2 * np.pi * t)
>>> pwm = signal.square(2 * np.pi * 30 * t, duty=(sig + 1)/2)
>>> plt.subplot(2, 1, 1)
>>> plt.plot(t, sig)
>>> plt.subplot(2, 1, 2)
>>> plt.plot(t, pwm)
>>> plt.ylim(-1.5, 1.5)
"""
t, w = asarray(t), asarray(duty)
w = asarray(w + (t - t))
t = asarray(t + (w - w))
if t.dtype.char in ['fFdD']:
ytype = t.dtype.char
else:
ytype = 'd'
y = zeros(t.shape, ytype)
# width must be between 0 and 1 inclusive
mask1 = (w > 1) | (w < 0)
place(y, mask1, nan)
# on the interval 0 to duty*2*pi function is 1
tmod = mod(t, 2 * pi)
mask2 = (1 - mask1) & (tmod < w * 2 * pi)
place(y, mask2, 1)
# on the interval duty*2*pi to 2*pi function is
# (pi*(w+1)-tmod) / (pi*(1-w))
mask3 = (1 - mask1) & (1 - mask2)
place(y, mask3, -1)
return y
def gausspulse(t, fc=1000, bw=0.5, bwr=-6, tpr=-60, retquad=False,
retenv=False):
"""
Return a Gaussian modulated sinusoid:
``exp(-a t^2) exp(1j*2*pi*fc*t).``
If `retquad` is True, then return the real and imaginary parts
(in-phase and quadrature).
If `retenv` is True, then return the envelope (unmodulated signal).
Otherwise, return the real part of the modulated sinusoid.
Parameters
----------
t : ndarray or the string 'cutoff'
Input array.
fc : int, optional
Center frequency (e.g. Hz). Default is 1000.
bw : float, optional
Fractional bandwidth in frequency domain of pulse (e.g. Hz).
Default is 0.5.
bwr : float, optional
Reference level at which fractional bandwidth is calculated (dB).
Default is -6.
tpr : float, optional
If `t` is 'cutoff', then the function returns the cutoff
time for when the pulse amplitude falls below `tpr` (in dB).
Default is -60.
retquad : bool, optional
If True, return the quadrature (imaginary) as well as the real part
of the signal. Default is False.
retenv : bool, optional
If True, return the envelope of the signal. Default is False.
Returns
-------
yI : ndarray
Real part of signal. Always returned.
yQ : ndarray
Imaginary part of signal. Only returned if `retquad` is True.
yenv : ndarray
Envelope of signal. Only returned if `retenv` is True.
See Also
--------
scipy.signal.morlet
Examples
--------
Plot real component, imaginary component, and envelope for a 5 Hz pulse,
sampled at 100 Hz for 2 seconds:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(-1, 1, 2 * 100, endpoint=False)
>>> i, q, e = signal.gausspulse(t, fc=5, retquad=True, retenv=True)
>>> plt.plot(t, i, t, q, t, e, '--')
"""
if fc < 0:
raise ValueError("Center frequency (fc=%.2f) must be >=0." % fc)
if bw <= 0:
raise ValueError("Fractional bandwidth (bw=%.2f) must be > 0." % bw)
if bwr >= 0:
raise ValueError("Reference level for bandwidth (bwr=%.2f) must "
"be < 0 dB" % bwr)
# exp(-a t^2) <-> sqrt(pi/a) exp(-pi^2/a * f^2) = g(f)
ref = pow(10.0, bwr / 20.0)
# fdel = fc*bw/2: g(fdel) = ref --- solve this for a
#
# pi^2/a * fc^2 * bw^2 /4=-log(ref)
a = -(pi * fc * bw) ** 2 / (4.0 * log(ref))
if t == 'cutoff': # compute cut_off point
# Solve exp(-a tc**2) = tref for tc
# tc = sqrt(-log(tref) / a) where tref = 10^(tpr/20)
if tpr >= 0:
raise ValueError("Reference level for time cutoff must be < 0 dB")
tref = pow(10.0, tpr / 20.0)
return sqrt(-log(tref) / a)
yenv = exp(-a * t * t)
yI = yenv * cos(2 * pi * fc * t)
yQ = yenv * sin(2 * pi * fc * t)
if not retquad and not retenv:
return yI
if not retquad and retenv:
return yI, yenv
if retquad and not retenv:
return yI, yQ
if retquad and retenv:
return yI, yQ, yenv
def chirp(t, f0, t1, f1, method='linear', phi=0, vertex_zero=True):
"""Frequency-swept cosine generator.
In the following, 'Hz' should be interpreted as 'cycles per unit';
there is no requirement here that the unit is one second. The
important distinction is that the units of rotation are cycles, not
radians. Likewise, `t` could be a measurement of space instead of time.
Parameters
----------
t : ndarray
Times at which to evaluate the waveform.
f0 : float
Frequency (e.g. Hz) at time t=0.
t1 : float
Time at which `f1` is specified.
f1 : float
Frequency (e.g. Hz) of the waveform at time `t1`.
method : {'linear', 'quadratic', 'logarithmic', 'hyperbolic'}, optional
Kind of frequency sweep. If not given, `linear` is assumed. See
Notes below for more details.
phi : float, optional
Phase offset, in degrees. Default is 0.
vertex_zero : bool, optional
This parameter is only used when `method` is 'quadratic'.
It determines whether the vertex of the parabola that is the graph
of the frequency is at t=0 or t=t1.
Returns
-------
y : ndarray
A numpy array containing the signal evaluated at `t` with the
requested time-varying frequency. More precisely, the function
returns ``cos(phase + (pi/180)*phi)`` where `phase` is the integral
(from 0 to `t`) of ``2*pi*f(t)``. ``f(t)`` is defined below.
See Also
--------
sweep_poly
Notes
-----
There are four options for the `method`. The following formulas give
the instantaneous frequency (in Hz) of the signal generated by
`chirp()`. For convenience, the shorter names shown below may also be
used.
linear, lin, li:
``f(t) = f0 + (f1 - f0) * t / t1``
quadratic, quad, q:
The graph of the frequency f(t) is a parabola through (0, f0) and
(t1, f1). By default, the vertex of the parabola is at (0, f0).
If `vertex_zero` is False, then the vertex is at (t1, f1). The
formula is:
if vertex_zero is True:
``f(t) = f0 + (f1 - f0) * t**2 / t1**2``
else:
``f(t) = f1 - (f1 - f0) * (t1 - t)**2 / t1**2``
To use a more general quadratic function, or an arbitrary
polynomial, use the function `scipy.signal.waveforms.sweep_poly`.
logarithmic, log, lo:
``f(t) = f0 * (f1/f0)**(t/t1)``
f0 and f1 must be nonzero and have the same sign.
This signal is also known as a geometric or exponential chirp.
hyperbolic, hyp:
``f(t) = f0*f1*t1 / ((f0 - f1)*t + f1*t1)``
f1 must be positive, and f0 must be greater than f1.
"""
# 'phase' is computed in _chirp_phase, to make testing easier.
phase = _chirp_phase(t, f0, t1, f1, method, vertex_zero)
# Convert phi to radians.
phi *= pi / 180
return cos(phase + phi)
def _chirp_phase(t, f0, t1, f1, method='linear', vertex_zero=True):
"""
Calculate the phase used by chirp_phase to generate its output.
See `chirp_phase` for a description of the arguments.
"""
f0 = float(f0)
t1 = float(t1)
f1 = float(f1)
if method in ['linear', 'lin', 'li']:
beta = (f1 - f0) / t1
phase = 2 * pi * (f0 * t + 0.5 * beta * t * t)
elif method in ['quadratic', 'quad', 'q']:
beta = (f1 - f0) / (t1 ** 2)
if vertex_zero:
phase = 2 * pi * (f0 * t + beta * t ** 3 / 3)
else:
phase = 2 * pi * (f1 * t + beta * ((t1 - t) ** 3 - t1 ** 3) / 3)
elif method in ['logarithmic', 'log', 'lo']:
if f0 * f1 <= 0.0:
raise ValueError("For a geometric chirp, f0 and f1 must be "
"nonzero and have the same sign.")
if f0 == f1:
phase = 2 * pi * f0 * t
else:
beta = t1 / log(f1 / f0)
phase = 2 * pi * beta * f0 * (pow(f1 / f0, t / t1) - 1.0)
elif method in ['hyperbolic', 'hyp']:
if f1 <= 0.0 or f0 <= f1:
raise ValueError("hyperbolic chirp requires f0 > f1 > 0.0.")
c = f1 * t1
df = f0 - f1
phase = 2 * pi * (f0 * c / df) * log((df * t + c) / c)
else:
raise ValueError("method must be 'linear', 'quadratic', 'logarithmic',"
" or 'hyperbolic', but a value of %r was given." % method)
return phase
def sweep_poly(t, poly, phi=0):
"""
Frequency-swept cosine generator, with a time-dependent frequency.
This function generates a sinusoidal function whose instantaneous
frequency varies with time. The frequency at time `t` is given by
the polynomial `poly`.
Parameters
----------
t : ndarray
Times at which to evaluate the waveform.
poly : 1-D array-like or instance of numpy.poly1d
The desired frequency expressed as a polynomial. If `poly` is
a list or ndarray of length n, then the elements of `poly` are
the coefficients of the polynomial, and the instantaneous
frequency is
``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]``
If `poly` is an instance of numpy.poly1d, then the
instantaneous frequency is
``f(t) = poly(t)``
phi : float, optional
Phase offset, in degrees, Default: 0.
Returns
-------
sweep_poly : ndarray
A numpy array containing the signal evaluated at `t` with the
requested time-varying frequency. More precisely, the function
returns ``cos(phase + (pi/180)*phi)``, where `phase` is the integral
(from 0 to t) of ``2 * pi * f(t)``; ``f(t)`` is defined above.
See Also
--------
chirp
Notes
-----
.. versionadded:: 0.8.0
If `poly` is a list or ndarray of length `n`, then the elements of
`poly` are the coefficients of the polynomial, and the instantaneous
frequency is:
``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]``
If `poly` is an instance of `numpy.poly1d`, then the instantaneous
frequency is:
``f(t) = poly(t)``
Finally, the output `s` is:
``cos(phase + (pi/180)*phi)``
where `phase` is the integral from 0 to `t` of ``2 * pi * f(t)``,
``f(t)`` as defined above.
"""
# 'phase' is computed in _sweep_poly_phase, to make testing easier.
phase = _sweep_poly_phase(t, poly)
# Convert to radians.
phi *= pi / 180
return cos(phase + phi)
def _sweep_poly_phase(t, poly):
"""
Calculate the phase used by sweep_poly to generate its output.
See `sweep_poly` for a description of the arguments.
"""
# polyint handles lists, ndarrays and instances of poly1d automatically.
intpoly = polyint(poly)
phase = 2 * pi * polyval(intpoly, t)
return phase
| bsd-3-clause |
poryfly/scikit-learn | sklearn/utils/metaestimators.py | 283 | 2353 | """Utilities for meta-estimators"""
# Author: Joel Nothman
# Andreas Mueller
# Licence: BSD
from operator import attrgetter
from functools import update_wrapper
__all__ = ['if_delegate_has_method']
class _IffHasAttrDescriptor(object):
"""Implements a conditional property using the descriptor protocol.
Using this class to create a decorator will raise an ``AttributeError``
if the ``attribute_name`` is not present on the base object.
This allows ducktyping of the decorated method based on ``attribute_name``.
See https://docs.python.org/3/howto/descriptor.html for an explanation of
descriptors.
"""
def __init__(self, fn, attribute_name):
self.fn = fn
self.get_attribute = attrgetter(attribute_name)
# update the docstring of the descriptor
update_wrapper(self, fn)
def __get__(self, obj, type=None):
# raise an AttributeError if the attribute is not present on the object
if obj is not None:
# delegate only on instances, not the classes.
# this is to allow access to the docstrings.
self.get_attribute(obj)
# lambda, but not partial, allows help() to work with update_wrapper
out = lambda *args, **kwargs: self.fn(obj, *args, **kwargs)
# update the docstring of the returned function
update_wrapper(out, self.fn)
return out
def if_delegate_has_method(delegate):
"""Create a decorator for methods that are delegated to a sub-estimator
This enables ducktyping by hasattr returning True according to the
sub-estimator.
>>> from sklearn.utils.metaestimators import if_delegate_has_method
>>>
>>>
>>> class MetaEst(object):
... def __init__(self, sub_est):
... self.sub_est = sub_est
...
... @if_delegate_has_method(delegate='sub_est')
... def predict(self, X):
... return self.sub_est.predict(X)
...
>>> class HasPredict(object):
... def predict(self, X):
... return X.sum(axis=1)
...
>>> class HasNoPredict(object):
... pass
...
>>> hasattr(MetaEst(HasPredict()), 'predict')
True
>>> hasattr(MetaEst(HasNoPredict()), 'predict')
False
"""
return lambda fn: _IffHasAttrDescriptor(fn, '%s.%s' % (delegate, fn.__name__))
| bsd-3-clause |
joshgabriel/dft-crossfilter | CompleteApp/crossfilter_app/main.py | 1 | 25701 | import os
from os.path import dirname, join
from collections import OrderedDict
import pandas as pd
import numpy as np
import json
from bokeh.io import curdoc
from bokeh.layouts import row, widgetbox, column, gridplot, layout
from bokeh.models import Select, Div, Column, \
HoverTool, ColumnDataSource, Button, RadioButtonGroup,\
MultiSelect
#from bokeh.models.widgets import RangeSlider
from bokeh.plotting import figure
from bokeh.sampledata.periodic_table import elements
import requests
plottables = ['k-point', 'value', 'perc_precisions']
x_select = Select(title='X-Axis', value='k-point', options=plottables)
y_select = Select(title='Y-Axis', value='value', options=plottables)
############## Header Content from description.html #################
content_filename1 = join(dirname(__file__), "ptable.html")
description1 = Div(text=open(content_filename1).read(),
render_as_text=False, width=600)
content_filename2 = join(dirname(__file__), "HomePage.html")
description2 = Div(text=open(content_filename2).read(),
render_as_text=False, width=600)
# periodic table widget
romans = ["I", "II", "III", "IV", "V", "VI", "VII"]
elements["atomic mass"] = elements["atomic mass"].astype(str)
print("Table---")
#print(elements.period)
print("---Table")
try:
elements["period"] = [romans[x-1] for x in elements.period]
except:
pass
elements = elements[elements.group != "-"]
group_range = [str(x) for x in range(1, 19)]
colormap = {
"c" : "#ffa07a",
"nc" : "#A9A9A9"
}
elems_colorpair = {'H':'nc','He':'nc',
'Li':'nc','Be':'nc','B':'nc','C':'nc', 'N':'nc', 'O':'nc','F':'nc','Ne':'nc',
'Na':'nc','Mg':'nc', 'Al':'c','Si':'nc','P':'nc','S':'nc','Cl':'nc','Ar':'nc',
'K': 'nc', 'Ca':'nc','Sc':'c', 'Ti':'c' ,'V':'c' , 'Cr':'c', 'Mn':'c', 'Fe':'c', 'Co':'c', 'Ni':'c', 'Cu':'c', 'Zn':'c',
'Rb':'nc', 'Sr':'nc','Y':'c', 'Zr':'c', 'Nb':'c', 'Mo':'c', 'Tc':'c', 'Ru':'c', 'Rh':'c', 'Pd':'c', 'Ag':'c','Cd': 'c',
'Cs':'nc', 'Ba':'nc', 'Hf':'c', 'Ta':'c', 'W':'c', 'Re':'c', 'Os':'c', 'Ir':'c', 'Pt':'c', 'Au':'c', 'Hg':'c'
}
elems_colorpair.update( { key:'nc' for key in list(elements['symbol']) if key not in list(elems_colorpair.keys()) } )
print ([ colormap[elems_colorpair[x]] for x in elements['symbol'] ])
source = ColumnDataSource(
data=dict(
group=[str(x) for x in elements["group"]],
period=[str(y) for y in elements["period"]],
symx=[str(x)+":0.1" for x in elements["group"]],
numbery=[str(x)+":0.8" for x in elements["period"]],
massy=[str(x)+":0.15" for x in elements["period"]],
namey=[str(x)+":0.3" for x in elements["period"]],
sym=elements["symbol"],
name=elements["name"],
# cpk=elements["CPK"],
atomic_number=elements["atomic number"],
# electronic=elements["electronic configuration"],
# mass=elements["atomic mass"],
B=['B' for x in elements["atomic mass"]],
dB=['dB' for x in elements["atomic mass"]],
V0=['V0' for x in elements["atomic mass"]],
E0=['E0' for x in elements["atomic mass"]],
# type=elements["metal"],
type_color=[ colormap[elems_colorpair[x]] for x in elements['symbol'] ],
)
)
# plot the periodic layout
#name = source.data["name"]
#B = source.data["B"]
# Display Table
#ptable1 = figure(title="Periodic Table", tools="hover",
# x_range=group_range, y_range=list(reversed(romans)))
#ptable1.plot_width = 1500
#ptable1.toolbar_location = None
#ptable1.outline_line_color = None
#ptable1.background_fill_color = 'white'
#ptable1.rect("group", "period", 0.9, 0.9, source=source,
# fill_alpha=0.3, color='type_color')
text_props = {
"source": source,
"angle": 0,
"color": "black",
"text_align": "left",
"text_baseline": "middle"
}
#ptable1.text(x="symx", y="period", text="sym",
# text_font_style="bold", text_font_size="22pt", **text_props)
#ptable1.text(x="symx", y="numbery", text="atomic_number",
# text_font_size="9pt", **text_props)
#ptable1.grid.grid_line_color = None
#ptable1.select_one(HoverTool).tooltips = [
# ("name", "@name"),
# ("V0 (A^3 per atom)", "@V0"),
# ("B (GPa)", "@B"),
# ("dB/dP", "@dB")
#]
#Interactive table
ptable2 = figure(title="Periodic Table", tools="hover",
x_range=group_range, y_range=list(reversed(romans)))
ptable2.plot_width = 1500
ptable2.toolbar_location = None
ptable2.outline_line_color = None
ptable2.background_fill_color = 'white'
ptable2.rect("group", "period", 0.9, 0.9, source=source,
fill_alpha=0.3, color='type_color')
ptable2.text(x="symx", y="period", text="sym",
text_font_style="bold", text_font_size="22pt", **text_props)
ptable2.text(x="symx", y="numbery", text="atomic_number",
text_font_size="9pt", **text_props)
ptable2.grid.grid_line_color = None
ptable2.select_one(HoverTool).tooltips = [
("name", "@name"),
("V0 (A^3 per atom)", "@V0"),
("B (GPa)", "@B"),
("dB/dP", "@dB")
]
######### CREATES CROSSFILTER ##########################
# decide if all columns or crossfilter down to sub properties
#source_data = pd.DataFrame({})#ColumnDataSource(data=dict())
class CrossFiltDFs():
def __init__(self,query_dict={'code':'VASP','exchange':'PBE',\
'element':'Au','structure':'fcc','properties':'B'},plot_data=None):
self.query_dict = query_dict
self.plot_data = plot_data
def crossfilter_by_tag(self,df, tag):
"""
a crossfilter that can recursivly update the unique options
in the UI based on prioir selections
returns crossfiltered by tag crossfilter {'element': 'Ag'}
"""
col,spec= list(tag.items())[0]
return df[df[col]==spec]
def update_ptable(self):
"""
update the periodic table highlighted elements
"""
from bokeh.sampledata.periodic_table import elements
romans = ["I", "II", "III", "IV", "V", "VI", "VII"]
elements["atomic mass"] = elements["atomic mass"].astype(str)
elements["period"] = [x for x in elements.period]
elements = elements[elements.group != "-"]
group_range = [str(x) for x in range(1, 19)]
print ('reaches colormap def')
colormap = {
"c" : "#ffa07a",
"nc" : "#A9A9A9"
}
elems_colorpair = {}
fcc_B_extrapol_props = {}
fcc_dB_extrapol_props = {}
fcc_V0_extrapol_props = {}
fcc_E0_extrapol_props = {}
bcc_B_extrapol_props = {}
bcc_dB_extrapol_props = {}
bcc_V0_extrapol_props = {}
bcc_E0_extrapol_props = {}
hcp_B_extrapol_props = {}
hcp_dB_extrapol_props = {}
hcp_V0_extrapol_props = {}
hcp_E0_extrapol_props = {}
available_elems = []
for e in elements["symbol"]:
if e in np.unique(list(self.plot_data['element'])):
available_elems.append(e)
for s in np.unique(list(self.plot_data['structure'])):
plot_struct = self.plot_data[self.plot_data['structure']==s]
plot_struct_elem = plot_struct[plot_struct['element']==e]
if s=='fcc':
try:
fcc_B_extrapol_props.update({e:list(plot_struct_elem['B'])[0]})
fcc_dB_extrapol_props.update({e:list(plot_struct_elem['BP'])[0]})
fcc_V0_extrapol_props.update({e:list(plot_struct_elem['V0'])[0]})
fcc_E0_extrapol_props.update({e:list(plot_struct_elem['E0'])[0]})
except:
pass
elif s=='bcc':
try:
bcc_B_extrapol_props.update({e:list(plot_struct_elem['B'])[0]})
bcc_dB_extrapol_props.update({e:list(plot_struct_elem['BP'])[0]})
bcc_V0_extrapol_props.update({e:list(plot_struct_elem['V0'])[0]})
bcc_E0_extrapol_props.update({e:list(plot_struct_elem['E0'])[0]})
except:
pass
elif s=='hcp':
try:
hcp_B_extrapol_props.update({e:list(plot_struct_elem['B'])[0]})
hcp_dB_extrapol_props.update({e:list(plot_struct_elem['BP'])[0]})
hcp_V0_extrapol_props.update({e:list(plot_struct_elem['V0'])[0]})
hcp_E0_extrapol_props.update({e:list(plot_struct_elem['E0'])[0]})
except:
pass
fcc_E0_extrapol_props.update({k:'xxx' for k in elements['symbol'] if k not in fcc_E0_extrapol_props})
fcc_V0_extrapol_props.update({k:'xxx' for k in elements['symbol'] if k not in fcc_V0_extrapol_props})
fcc_B_extrapol_props.update({k:'xxx' for k in elements['symbol'] if k not in fcc_B_extrapol_props})
fcc_dB_extrapol_props.update({k:'xxx' for k in elements['symbol'] if k not in fcc_dB_extrapol_props})
bcc_E0_extrapol_props.update({k:'xxx' for k in elements['symbol'] if k not in bcc_E0_extrapol_props})
bcc_V0_extrapol_props.update({k:'xxx' for k in elements['symbol'] if k not in bcc_V0_extrapol_props})
bcc_B_extrapol_props.update({k:'xxx' for k in elements['symbol'] if k not in bcc_B_extrapol_props})
bcc_dB_extrapol_props.update({k:'xxx' for k in elements['symbol'] if k not in bcc_dB_extrapol_props})
hcp_E0_extrapol_props.update({k:'xxx' for k in elements['symbol'] if k not in hcp_E0_extrapol_props})
hcp_V0_extrapol_props.update({k:'xxx' for k in elements['symbol'] if k not in hcp_V0_extrapol_props})
hcp_B_extrapol_props.update({k:'xxx' for k in elements['symbol'] if k not in hcp_B_extrapol_props})
hcp_dB_extrapol_props.update({k:'xxx' for k in elements['symbol'] if k not in hcp_dB_extrapol_props})
elems_colorpair.update( { key:'c' for key in np.unique(available_elems) } )
elems_colorpair.update( { key:'nc' for key in list(elements['symbol']) if key not in list(elems_colorpair.keys()) } )
print ([ colormap[elems_colorpair[x]] for x in elements['symbol'] ])
source = ColumnDataSource(
data=dict(
group=[str(x) for x in elements["group"]],
period=[str(y) for y in elements["period"]],
symx=[str(x)+":0.1" for x in elements["group"]],
numbery=[str(x)+":0.8" for x in elements["period"]],
massy=[str(x)+":0.15" for x in elements["period"]],
namey=[str(x)+":0.3" for x in elements["period"]],
sym=elements["symbol"],
name=elements["name"],
# cpk=elements["CPK"],
atomic_number=elements["atomic number"],
# electronic=elements["electronic configuration"],
fcc_B=[fcc_B_extrapol_props[x] for x in elements["symbol"]],
fcc_dB=[fcc_dB_extrapol_props[x] for x in elements["symbol"]],
fcc_V0=[fcc_V0_extrapol_props[x] for x in elements["symbol"]],
fcc_E0=[fcc_E0_extrapol_props[x] for x in elements["symbol"]],
bcc_B=[bcc_B_extrapol_props[x] for x in elements["symbol"]],
bcc_dB=[bcc_dB_extrapol_props[x] for x in elements["symbol"]],
bcc_V0=[bcc_V0_extrapol_props[x] for x in elements["symbol"]],
bcc_E0=[bcc_E0_extrapol_props[x] for x in elements["symbol"]],
hcp_B=[hcp_B_extrapol_props[x] for x in elements["symbol"]],
hcp_dB=[hcp_dB_extrapol_props[x] for x in elements["symbol"]],
hcp_V0=[hcp_V0_extrapol_props[x] for x in elements["symbol"]],
hcp_E0=[hcp_E0_extrapol_props[x] for x in elements["symbol"]],
type=elements["metal"],
type_color=[ colormap[elems_colorpair[x]] for x in elements['symbol'] ],
)
)
# plot the periodic layout
#name = source.data["name"]
#B = source.data["B"]
ptable = figure(title="Periodic Table", tools="hover",
x_range=group_range, y_range=list(reversed(romans)))
ptable.background_fill_color='white'
ptable.plot_width = 1500
ptable.toolbar_location = None
ptable.outline_line_color = None
ptable.rect("group", "period", 0.9, 0.9, source=source,
fill_alpha=0.3, color='type_color')
text_props = {
"source": source,
"angle": 0,
"color": "black",
"text_align": "left",
"text_baseline": "middle"
}
ptable.text(x="symx", y="period", text="sym",
text_font_style="bold", text_font_size="22pt", **text_props)
ptable.text(x="symx", y="numbery", text="atomic_number",
text_font_size="9pt", **text_props)
# ptable.text(x="symx", y="namey", text="name",
# text_font_size="6pt", **text_props)
# ptable.text(x="symx", y="massy", text="mass",
# text_font_size="5pt", **text_props)
ptable.grid.grid_line_color = None
ptable.select_one(HoverTool).tooltips = [
("name", "@name"),
("fcc, V0 (A^3 per atom)", "@fcc_V0"),
("fcc, B (GPa)", "@fcc_B"),
("fcc, dB/dP", "@fcc_dB"),
("bcc, V0 (A^3 per atom)", "@bcc_V0"),
("bcc, B (GPa)", "@bcc_B"),
("bcc, dB/dP", "@bcc_dB"),
("hcp, V0 (A^3 per atom)", "@hcp_V0"),
("hcp, B (GPa)", "@hcp_B"),
("hcp, dB/dP", "@hcp_dB")]
return ptable
def convert_multi_query_to_dicts(self,user_query):
"""
"""
for k in user_query:
self.query_api(endpoint='precvalue')
pass
def plot_prec_value(self):
print ('Triggering crossfilter')
print ('executes this on startup')
layout.children[6] = self.multi_2Dplot_pade_figure(self.plot_data)
def multi_2Dplot_pade_figure(self,datasets):
"""
method which plots multiple curves of different color
on the same bokeh figure canvas. Will receive query results from the precvalue
end point on the E0k, V0k, Bk, BPk, kpoints data. x is always kpoints data log scaled
Example user query is {'code':'VASP','exchange':'PBE','element':'Al','structure':'fcc','property':'B'} +
{'code':'VASP','exchange':'PBE','element':'Al','structure':'hcp','property':'B'} +
{'code':'DMol3','exchange':'LDA','element':'Al','structure':'fcc', 'property':'B'} +
{'code':'DMol3','exchange':'LDA','element':'Al','structure':'hcp', 'property':'B'}
"""
# receive a dict of datasets: {'Plot1':{'x':[],'y':[], 'x_title': None, 'y_title': None,
# 'Characteristic':'VASP_PBE_Al_fcc_B'}, 'Plot2':{'x':[],'y':[], 'x_title': None, 'y_title': None}}
def color_marker_divider(characteristics):
cm_keys= {'00':('red','*'),'01':('red','-.-'),'02':('red','*'),'03':('red','^'),\
'10':('blue','*'),'11':('blue','-.-'),'12':('blue','*'),'13':('blue','^')
}
DictCharacters = \
[{n:att for n,att in enumerate(c.split('_'))} for c in characteristics]
# one or two char value different and same code and exchange: same color different marker
# else different color and marker.
return cm_keys
kw = {}
kw['title'] = 'Pade Analysis Plots'
kw['x_axis_type'] = 'log'
self.p1 = figure(plot_height=600, plot_width=800, tools='pan,wheel_zoom,box_zoom,reset,hover', **kw)
self.p1.xaxis.axis_label = x_title
self.p1.yaxis.axis_label = y_title
#color_marker_divider(characteristics)
for dset in datasets:
xs = datasets[dset]['x']
ys = datasets[dset]['y']
#c,m = color_marker_divider(characteristics)['00']
self.p1.scatter(x=xs, y=ys)#, alpha=1.0, hover_color='blue', hover_alpha=1.0)
return self.p1
def multi_precisions_correlate(self, datasets):
"""
method which allows the user to plot various precisions
against each other. Also prints out the M-value and intercepts
of precision at 1 meV/atom, 0.1 meV/atom and 0.01 meV/atom
"""
pass
def kpoints_interactive_selector(self, dataset):
"""
method which creates a pareto optimal plot for the chosen structure, material,
code and exchange and with the user input of desired precision returns
the kpoints per atom choice.
"""
pass
def pade_visualize(self,dataset):
"""
method which creates the Pade contour interpolation over the
raw evk data. Receives query result from the evk endpoint
"""
pass
def create_figure(self,dataset,datplot='Init',plot_type=None):
"""
figure and plot creation for a given dataset
TODO: enable support for multiple selection
refactor to a simple figure creator and
add helper functions for the plots
"""
kw = dict()
x_title = x_select.value.title() + ' Density per atom'
# hack for labels now
if isinstance(dataset,pd.DataFrame):
if np.unique(list(dataset['properties']))[0]=='B':
y_title = 'Bulk Modulus (GPa) '+y_select.value.title()
elif np.unique(list(dataset['properties']))[0]=='dB':
y_title = 'dB/dP '+y_select.value.title()
elif np.unique(list(dataset['properties']))[0]=='v0':
y_title = 'Volume per atom (A^3) '+y_select.value.title()
elif np.unique(list(dataset['properties']))[0]=='E0':
y_title = 'DFT Energy per atom (eV/atom) '+y_select.value.title()
else:
y_title = 'Pade Prediction'
kw['title'] = "%s vs %s" % (y_title, x_title)
#if x_select.value=='k-point':
kw['x_axis_type'] = 'log'
if x_select.value == 'perc_precisions' and y_select.value == 'perc_precisions':
kw['y_axis_type'] = 'log'
self.p = figure(plot_height=600, plot_width=800, tools='pan,wheel_zoom,box_zoom,reset,hover', **kw)
# sets the axes
self.p.xaxis.axis_label = x_title
self.p.yaxis.axis_label = y_title
#if x_select.value in continuous:
self.p.xaxis.major_label_orientation = pd.np.pi / 4
#print (dataset)
if datplot=='Init':
# if data is to be plotted
xs = [1,2,3,4]#dataset[x_select.value].values
ys = [1,2,3,4]#dataset[y_select.value].values
self.xs_init = xs
self.ys_init = ys
self.p.scatter(x=xs, y=ys)#, alpha=1.0, hover_color='blue', hover_alpha=1.0)
return self.p
elif datplot == 'Add':
# add a plot to figure, from statistical analysis
if plot_type == 'plot_pade':
#pade_order = self.analysis_results['Order']
#pade_extrapolate = self.analysis_results['Extrapolate']
#print (pade_extrapolate, float(pade_extrapolate))
# create precisions based on the extrapolate
#print (self.add_data)
xs = self.add_data[0]
ys = self.add_data[1]#[abs(y-pade_extrapolate) for y in self.ys_init]
#print (ys)
# print (xs,ys,len(xs),len(ys))
print ("Plots a line supposedly")
#print (len(self.ys_init), len(ys))
#l = min([len(self.ys_init), len(ys), len(self.xs_init),len(xs)])
#self.plot_layout.scatter(x=self.xs_init[0:l], y=self.ys_init[0:l])#, alpha=1.0, hover_color='blue', hover_alpha=1.0)
#print (type(self.plot_layout))
#self.p.self.plot
self.p = figure(plot_height=600, plot_width=800, tools='pan,wheel_zoom,box_zoom,reset,box_zoom, hover', **kw)
print('executes till re-figure')
self.p.circle(x=self.xs_init,y=self.ys_init)
print('executes till circle')
self.p.line(x=xs, y=ys, line_color='red')
#self.p.line_color='red'
print('executes till line')
return self.p
else:
# clear the figure by plotting an empty figure
xs = []
ys = []
self.p = figure(plot_height=600, plot_width=800, tools='pan,wheel_zoom,box_zoom,reset,hover', **kw)
self.p.scatter(x=xs, y=ys)#, alpha=1.0, hover_color='blue', hover_alpha=1.0)
return self.p
# The crossfilter widgets
def update(self, attr, old, new):
print ('Attribute', attr, 'OLD', old, 'NEW', new)
print ('executes here on update')#, exchange_df)
def update_code(self):
"""
update for the code selection
"""
print ('update code')
self.query_dict.update({'code':code.value})
def update_exchange(self):
"""
update the exchange
"""
print ('update exchange')
self.query_dict.update({'exchange':exchange.value})
def update_element(self,new):
print ('Updating element down selection for properties',element.active[0])
self.query_dict.update({'element':element.active[0]})
def update_struct(self):
print ('Updating struct down selection for element')
self.query_dict.update({'structure':struct.value})
print ('Updating ptable with structure selection')
layout.children[1] = self.update_ptable()
print ('finished callback to update layout')
def update_prop(self):
self.query_dict.update({'properties':prop.value})
def update_kpoints(self):
pass
def update_x(self):
self.x = x.value
pass
def update_y(self):
self.y = y.value
pass
def query_api(self,endpoint):
r = requests.post(url='http://0.0.0.0:6400/bench/v1/query_{}'.\
format(endpoint),data=json.dumps(self.query_dict))
ListOfDicts = r.json()['content']
self.plot_data = pd.concat([pd.DataFrame({k:[ld[k]] for k in list(ld.keys())}) for ld in ListOfDicts])
def clear_crossfilter(self):
"""
clear the figure and crossfilter
"""
print ('Trigger clear')
self.plot_data = None
layout.children[6] = self.create_figure(self.plot_data)
def analysis_callback(self):
"""
calls the Pade analysis on the current plot data
TODO:
NOTE: check if this is a data set that is a single scatter
FEATUREs that could be added: plot the Pade for multiple selections
"""
print ('called Pade analysis')
# writes out the crossfiltered plot data on the server
crossfilt = self.plot_data[['k-point','value']]
crossfilt.columns=['Kpt','P']
crossfilt.to_csv('crossfilter_app/Rdata.csv')
print ('wrote out data file')
os.system('Rscript crossfilter_app/non_err_weighted_nls.R')
self.analysis_results = pd.read_csv('crossfilter_app/Result.csv')
#self.add_data = [ list(self.xs_init), list(self.predict_results['Preds....c.predict.m2..']) ]
ext_values = list(self.analysis_results['Extrapolate....extrapolates'])
error_values = list(self.analysis_results['Error....errors'])
self.ext_min_error = ext_values[error_values.index(min(error_values))]
print ('executed R script on crossfiltered data')
if error_values.index(min(error_values))==0:
self.predict_results = pd.read_csv('crossfilter_app/Pade1.csv')
self.add_data = [list(self.predict_results['Px....x_plot']), list(self.predict_results['Py....pade1.x_plot.'])]
elif error_values.index(min(error_values))==1:
self.predict_results = pd.read_csv('crossfilter_app/Pade2.csv')
self.add_data = [list(self.predict_results['Px....x_plot']), list(self.predict_results['Py....pade2.x_plot.'])]
print ('ADD DATA', self.add_data)
layout.children[4] = self.create_figure(self.add_data, datplot='Add', plot_type='plot_pade')
def update():
pass
#source_data = CF.plot_data
CF = CrossFiltDFs(query_dict={'code':'VASP','exchange':'PBE'})
# first query for the periodic table data
CF.query_api(endpoint='extrapolate')
print (CF.plot_data)
# for the first table to display VASP PBE all structures Pade extrapolates for all properties
# as a bonus with some error bar too
ptable1 = CF.update_ptable()
layout_doc = column(description1, ptable1, description2)
#layout_doc = layout([description1],\
# [ptable1],\
# [description2],\
# row([element,code,exchange,struct]),\
# row([element,code,exchange,struct]),\
# row([element,code,exchange,struct]),\
# row([element,code,exchange,struct]),\
# sizing_mode='stretch_both'
# )
#column(description1, ptable1, description2, controls1, ptable2, controls2)
print ('executed till here')
curdoc().add_root(layout_doc)
curdoc().title = "DFT Benchmark"
update()
| mit |
wzbozon/scikit-learn | sklearn/feature_extraction/hashing.py | 183 | 6155 | # Author: Lars Buitinck <[email protected]>
# License: BSD 3 clause
import numbers
import numpy as np
import scipy.sparse as sp
from . import _hashing
from ..base import BaseEstimator, TransformerMixin
def _iteritems(d):
"""Like d.iteritems, but accepts any collections.Mapping."""
return d.iteritems() if hasattr(d, "iteritems") else d.items()
class FeatureHasher(BaseEstimator, TransformerMixin):
"""Implements feature hashing, aka the hashing trick.
This class turns sequences of symbolic feature names (strings) into
scipy.sparse matrices, using a hash function to compute the matrix column
corresponding to a name. The hash function employed is the signed 32-bit
version of Murmurhash3.
Feature names of type byte string are used as-is. Unicode strings are
converted to UTF-8 first, but no Unicode normalization is done.
Feature values must be (finite) numbers.
This class is a low-memory alternative to DictVectorizer and
CountVectorizer, intended for large-scale (online) learning and situations
where memory is tight, e.g. when running prediction code on embedded
devices.
Read more in the :ref:`User Guide <feature_hashing>`.
Parameters
----------
n_features : integer, optional
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
dtype : numpy type, optional
The type of feature values. Passed to scipy.sparse matrix constructors
as the dtype argument. Do not set this to bool, np.boolean or any
unsigned integer type.
input_type : string, optional
Either "dict" (the default) to accept dictionaries over
(feature_name, value); "pair" to accept pairs of (feature_name, value);
or "string" to accept single strings.
feature_name should be a string, while value should be a number.
In the case of "string", a value of 1 is implied.
The feature_name is hashed to find the appropriate column for the
feature. The value's sign might be flipped in the output (but see
non_negative, below).
non_negative : boolean, optional, default np.float64
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
Examples
--------
>>> from sklearn.feature_extraction import FeatureHasher
>>> h = FeatureHasher(n_features=10)
>>> D = [{'dog': 1, 'cat':2, 'elephant':4},{'dog': 2, 'run': 5}]
>>> f = h.transform(D)
>>> f.toarray()
array([[ 0., 0., -4., -1., 0., 0., 0., 0., 0., 2.],
[ 0., 0., 0., -2., -5., 0., 0., 0., 0., 0.]])
See also
--------
DictVectorizer : vectorizes string-valued features using a hash table.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, n_features=(2 ** 20), input_type="dict",
dtype=np.float64, non_negative=False):
self._validate_params(n_features, input_type)
self.dtype = dtype
self.input_type = input_type
self.n_features = n_features
self.non_negative = non_negative
@staticmethod
def _validate_params(n_features, input_type):
# strangely, np.int16 instances are not instances of Integral,
# while np.int64 instances are...
if not isinstance(n_features, (numbers.Integral, np.integer)):
raise TypeError("n_features must be integral, got %r (%s)."
% (n_features, type(n_features)))
elif n_features < 1 or n_features >= 2 ** 31:
raise ValueError("Invalid number of features (%d)." % n_features)
if input_type not in ("dict", "pair", "string"):
raise ValueError("input_type must be 'dict', 'pair' or 'string',"
" got %r." % input_type)
def fit(self, X=None, y=None):
"""No-op.
This method doesn't do anything. It exists purely for compatibility
with the scikit-learn transformer API.
Returns
-------
self : FeatureHasher
"""
# repeat input validation for grid search (which calls set_params)
self._validate_params(self.n_features, self.input_type)
return self
def transform(self, raw_X, y=None):
"""Transform a sequence of instances to a scipy.sparse matrix.
Parameters
----------
raw_X : iterable over iterable over raw features, length = n_samples
Samples. Each sample must be iterable an (e.g., a list or tuple)
containing/generating feature names (and optionally values, see
the input_type constructor argument) which will be hashed.
raw_X need not support the len function, so it can be the result
of a generator; n_samples is determined on the fly.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Feature matrix, for use with estimators or further transformers.
"""
raw_X = iter(raw_X)
if self.input_type == "dict":
raw_X = (_iteritems(d) for d in raw_X)
elif self.input_type == "string":
raw_X = (((f, 1) for f in x) for x in raw_X)
indices, indptr, values = \
_hashing.transform(raw_X, self.n_features, self.dtype)
n_samples = indptr.shape[0] - 1
if n_samples == 0:
raise ValueError("Cannot vectorize empty sequence.")
X = sp.csr_matrix((values, indices, indptr), dtype=self.dtype,
shape=(n_samples, self.n_features))
X.sum_duplicates() # also sorts the indices
if self.non_negative:
np.abs(X.data, X.data)
return X
| bsd-3-clause |
nguyentu1602/statsmodels | statsmodels/examples/try_polytrend.py | 33 | 1477 |
from __future__ import print_function
import numpy as np
#import statsmodels.linear_model.regression as smreg
from scipy import special
import statsmodels.api as sm
from statsmodels.datasets.macrodata import data
dta = data.load()
gdp = np.log(dta.data['realgdp'])
from numpy import polynomial
from scipy import special
maxorder = 20
polybase = special.chebyt
polybase = special.legendre
t = np.linspace(-1,1,len(gdp))
exog = np.column_stack([polybase(i)(t) for i in range(maxorder)])
fitted = [sm.OLS(gdp, exog[:, :maxr]).fit().fittedvalues for maxr in
range(2,maxorder)]
print((np.corrcoef(exog[:,1:6], rowvar=0)*10000).astype(int))
import matplotlib.pyplot as plt
plt.figure()
plt.plot(gdp, 'o')
for i in range(maxorder-2):
plt.plot(fitted[i])
plt.figure()
#plt.plot(gdp, 'o')
for i in range(maxorder-4, maxorder-2):
#plt.figure()
plt.plot(gdp - fitted[i])
plt.title(str(i+2))
plt.figure()
plt.plot(gdp, '.')
plt.plot(fitted[-1], lw=2, color='r')
plt.plot(fitted[0], lw=2, color='g')
plt.title('GDP and Polynomial Trend')
plt.figure()
plt.plot(gdp - fitted[-1], lw=2, color='r')
plt.plot(gdp - fitted[0], lw=2, color='g')
plt.title('Residual GDP minus Polynomial Trend (green: linear, red: legendre(20))')
#orthonormalize an exog using QR
ex2 = t[:,None]**np.arange(6) #np.vander has columns reversed
q2,r2 = np.linalg.qr(ex2, mode='full')
np.max(np.abs(np.dot(q2.T, q2)-np.eye(6)))
plt.figure()
plt.plot(q2, lw=2)
plt.show()
| bsd-3-clause |
vermouthmjl/scikit-learn | examples/linear_model/plot_iris_logistic.py | 3 | 1679 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logistic Regression 3-class Classifier
=========================================================
Show below is a logistic-regression classifiers decision boundaries on the
`iris <https://en.wikipedia.org/wiki/Iris_flower_data_set>`_ dataset. The
datapoints are colored according to their labels.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
h = .02 # step size in the mesh
logreg = linear_model.LogisticRegression(C=1e5)
# we create an instance of Neighbours Classifier and fit the data.
logreg.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1, figsize=(4, 3))
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
mortbauer/openfoam-extend-Breeder-other-scripting-PyFoam | PyFoam/Basics/XkcdMatplotlibTimelines.py | 2 | 1338 | # ICE Revision: $Id$
"""Plots a collection of timelines"""
from PyFoam.Error import warning,error
from .MatplotlibTimelines import MatplotlibTimelines
class XkcdMatplotlibTimelines(MatplotlibTimelines):
"""This class opens a matplotlib window, modifies it to XKCD-mode and plots a timelines-collection in it"""
def __init__(self,
timelines,
custom,
showWindow=True,
registry=None):
"""@param timelines: The timelines object
@type timelines: TimeLineCollection
@param custom: A CustomplotInfo-object. Values in this object usually override the
other options
"""
MatplotlibTimelines.__init__(self,
timelines,
custom,
showWindow=showWindow,
registry=registry
)
from matplotlib import pyplot
try:
pyplot.xkcd()
except AttributeError:
from matplotlib import __version__
warning("Installed version",__version__,
" of Matplotlib does not support XKCD-mode (this is supported starting with version 1.3). Falling back to normal operations")
# Should work with Python3 and Python2
| gpl-2.0 |
mindriot101/bokeh | bokeh/core/json_encoder.py | 3 | 9053 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2018, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide a functions and classes to implement a custom JSON encoder for
serializing objects for BokehJS.
The primary interface is provided by the |serialize_json| function, which
uses the custom |BokehJSONEncoder| to produce JSON output.
In general, functions in this module convert values in the following way:
* Datetime values (Python, Pandas, NumPy) are converted to floating point
milliseconds since epoch.
* TimeDelta values are converted to absolute floating point milliseconds.
* RelativeDelta values are converted to dictionaries.
* Decimal values are converted to floating point.
* Sequences (Pandas Series, NumPy arrays, python sequences) that are passed
though this interface are converted to lists. Note, however, that arrays in
data sources inside Bokeh Documents are converted elsewhere, and by default
use a binary encoded format.
* Bokeh ``Model`` instances are usually serialized elsewhere in the context
of an entire Bokeh Document. Models passed trough this interface are
converted to references.
* ``HasProps`` (that are not Bokeh models) are converted to key/value dicts or
all their properties and values.
* ``Color`` instances are converted to CSS color values.
.. |serialize_json| replace:: :class:`~bokeh.core.json_encoder.serialize_json`
.. |BokehJSONEncoder| replace:: :class:`~bokeh.core.json_encoder.BokehJSONEncoder`
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import collections
import decimal
import json
# External imports
import numpy as np
# Bokeh imports
from ..settings import settings
from ..util.dependencies import import_optional
from ..util.serialization import convert_datetime_type, convert_timedelta_type, is_datetime_type, is_timedelta_type, transform_series, transform_array
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
rd = import_optional("dateutil.relativedelta")
pd = import_optional('pandas')
__all__ = (
'BokehJSONEncoder',
'serialize_json',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
def serialize_json(obj, pretty=None, indent=None, **kwargs):
''' Return a serialized JSON representation of objects, suitable to
send to BokehJS.
This function is typically used to serialize single python objects in
the manner expected by BokehJS. In particular, many datetime values are
automatically normalized to an expected format. Some Bokeh objects can
also be passed, but note that Bokeh models are typically properly
serialized in the context of an entire Bokeh document.
The resulting JSON always has sorted keys. By default. the output is
as compact as possible unless pretty output or indentation is requested.
Args:
obj (obj) : the object to serialize to JSON format
pretty (bool, optional) :
Whether to generate prettified output. If ``True``, spaces are
added after added after separators, and indentation and newlines
are applied. (default: False)
Pretty output can also be enabled with the environment variable
``BOKEH_PRETTY``, which overrides this argument, if set.
indent (int or None, optional) :
Amount of indentation to use in generated JSON output. If ``None``
then no indentation is used, unless pretty output is enabled,
in which case two spaces are used. (default: None)
Any additional keyword arguments are passed to ``json.dumps``, except for
some that are computed internally, and cannot be overridden:
* allow_nan
* indent
* separators
* sort_keys
Examples:
.. code-block:: python
>>> data = dict(b=np.datetime64('2017-01-01'), a = np.arange(3))
>>>print(serialize_json(data))
{"a":[0,1,2],"b":1483228800000.0}
>>> print(serialize_json(data, pretty=True))
{
"a": [
0,
1,
2
],
"b": 1483228800000.0
}
'''
# these args to json.dumps are computed internally and should not be passed along
for name in ['allow_nan', 'separators', 'sort_keys']:
if name in kwargs:
raise ValueError("The value of %r is computed internally, overriding is not permissable." % name)
if pretty is None:
pretty = settings.pretty(False)
if pretty:
separators=(",", ": ")
else:
separators=(",", ":")
if pretty and indent is None:
indent = 2
return json.dumps(obj, cls=BokehJSONEncoder, allow_nan=False, indent=indent, separators=separators, sort_keys=True, **kwargs)
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
class BokehJSONEncoder(json.JSONEncoder):
''' A custom ``json.JSONEncoder`` subclass for encoding objects in
accordance with the BokehJS protocol.
'''
def transform_python_types(self, obj):
''' Handle special scalars such as (Python, NumPy, or Pandas)
datetimes, or Decimal values.
Args:
obj (obj) :
The object to encode. Anything not specifically handled in
this method is passed on to the default system JSON encoder.
'''
# date/time values that get serialized as milliseconds
if is_datetime_type(obj):
return convert_datetime_type(obj)
if is_timedelta_type(obj):
return convert_timedelta_type(obj)
# slice objects
elif isinstance(obj, slice):
return dict(start=obj.start, stop=obj.stop, step=obj.step)
# NumPy scalars
elif np.issubdtype(type(obj), np.floating):
return float(obj)
elif np.issubdtype(type(obj), np.integer):
return int(obj)
elif np.issubdtype(type(obj), np.bool_):
return bool(obj)
# Decimal values
elif isinstance(obj, decimal.Decimal):
return float(obj)
# RelativeDelta gets serialized as a dict
elif rd and isinstance(obj, rd.relativedelta):
return dict(years=obj.years,
months=obj.months,
days=obj.days,
hours=obj.hours,
minutes=obj.minutes,
seconds=obj.seconds,
microseconds=obj.microseconds)
else:
return super(BokehJSONEncoder, self).default(obj)
def default(self, obj):
''' The required ``default`` method for JSONEncoder subclasses.
Args:
obj (obj) :
The object to encode. Anything not specifically handled in
this method is passed on to the default system JSON encoder.
'''
from ..model import Model
from ..colors import Color
from .has_props import HasProps
# array types -- use force_list here, only binary
# encoding CDS columns for now
if pd and isinstance(obj, (pd.Series, pd.Index)):
return transform_series(obj, force_list=True)
elif isinstance(obj, np.ndarray):
return transform_array(obj, force_list=True)
elif isinstance(obj, collections.deque):
return list(map(self.default, obj))
elif isinstance(obj, Model):
return obj.ref
elif isinstance(obj, HasProps):
return obj.properties_with_values(include_defaults=False)
elif isinstance(obj, Color):
return obj.to_css()
else:
return self.transform_python_types(obj)
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| bsd-3-clause |
meerkat-code/meerkat_api | api_background/api_background/export_data.py | 1 | 40520 | """
Functions to export data
"""
import gettext
import shelve
import csv
import json
import logging
import xlsxwriter
import os
import yaml
from sqlalchemy.orm import aliased
from sqlalchemy import text, or_, func, Float
from dateutil.parser import parse
from datetime import datetime
from celery import task
import requests
import pandas
from api_background._populate_locations import set_empty_locations, populate_row_locations
from api_background.xls_csv_writer import XlsCsvFileWriter
from meerkat_abacus import config
from meerkat_abacus.model import DownloadDataFiles, AggregationVariables
from meerkat_abacus.model import form_tables, Data, Links
from meerkat_abacus.util import all_location_data, get_db_engine, get_links
from meerkat_abacus.util import get_locations, is_child
from meerkat_abacus.util.epi_week import epi_week_for_date
from api_background.celery_app import app
import meerkat_libs
base_folder = os.path.dirname(os.path.realpath(__file__))
@app.task
def export_data(uuid, allowed_location, use_loc_ids=False, param_config_yaml=yaml.dump(config)):
"""
Exports the data table from db
Inserts finished file in to databse
Args:
uuid: uuid for download
use_loc_ids: If we use names are location ids
"""
db, session = get_db_engine()
status = DownloadDataFiles(
uuid=uuid,
generation_time=datetime.now(),
type="data",
success=0,
status=0
)
session.add(status)
session.commit()
results = session.query(
func.distinct(
func.jsonb_object_keys(Data.variables)))
variables = []
for row in results:
variables.append(row[0])
locs = get_locations(session)
fieldnames = ["id", "zone", "country", "region",
"district", "clinic", "zone_id", "country_id", "region_id",
"district_id", "clinic_id", "clinic_type",
"geolocation", "date", "uuid"] + list(variables)
dict_rows = []
filename = base_folder + "/exported_data/" + uuid + "/data"
os.mkdir(base_folder + "/exported_data/" + uuid)
output = open(filename + ".csv", "w")
writer = csv.DictWriter(output, fieldnames, extrasaction="ignore")
writer.writeheader()
results = session.query(Data).yield_per(500)
i = 0
for row in results:
dict_row = dict(
(col, getattr(row, col)) for col in row.__table__.columns.keys()
)
for l in ["country", "zone", "region", "district", "clinic"]:
if dict_row[l]:
dict_row[l + "_id"] = dict_row[l]
dict_row[l] = locs[dict_row[l]].name
dict_row.update(dict_row.pop("variables"))
dict_rows.append(dict_row)
if i % 1000 == 0:
writer.writerows(dict_rows)
dict_rows = []
i += 1
writer.writerows(dict_rows)
status.status = 1
status.success = 1
session.commit()
return True
@app.task
def export_category(uuid, form_name, category, download_name,
variables, data_type, allowed_location,
start_date=None, end_date=None, language="en",
param_config_yaml=yaml.dump(config)):
"""
We take a variable dictionary of form field name: display_name.
There are some special commands that can be given in the form field name:
* icd_name$category will translate an icd code in icd_code to names given
by the variables in category
* clinic,region and district will give this location information
* the $translate keyword can be used to translate row values to other ones.
I.e to change gender from male, female to M, F
* field$month, field$year, field$epi_week: will extract the month, year
or epi_week from the field
* alert_links$alert_investigation$field: will get the field in the c
orrepsonding alert_investigation
Inserts the resulting csv file in the database
Args:\n
category: category to match\n
variables: variable dictionary\n
"""
# Runner loads the config object through a function parameter.
param_config = yaml.load(param_config_yaml)
country_config = param_config.country_config
config_directory = param_config.config_directory
# Some strings in download data need to be translated
translation_dir = country_config.get("translation_dir", None)
t = get_translator(param_config, language)
db, session = get_db_engine()
db2, session2 = get_db_engine()
status = DownloadDataFiles(
uuid=uuid,
generation_time=datetime.now(),
type=download_name,
success=0,
status=0
)
session.add(status)
session.commit()
res = session.query(AggregationVariables).filter(
AggregationVariables.category.has_key(category)
)
locs = get_locations(session)
data_keys = []
cat_variables = {}
for r in res:
data_keys.append(r.id)
cat_variables[r.id] = r
if len(data_keys) == 0:
status.status = 1
session.commit()
return_keys = []
translation_dict = {}
icd_code_to_name = {}
link_ids = []
min_translation = {}
def add_translations_from_file(details):
# Load the csv file and reader
file_path = '{}api/{}'.format(config_directory, details['dict_file'])
csv_file = open(file_path, 'rt')
reader = csv.reader(csv_file)
# Establish which column in each row we're translating from and to.
headers = next(reader)
from_index = headers.index(details['from'])
to_index = headers.index(details['to'])
# Add translations to the translation dictionary.
trans_dict = {}
for row in reader:
trans_dict[row[from_index]] = row[to_index]
return trans_dict
# DB conditions
conditions = [
or_(Data.variables.has_key(key) for key in data_keys)
]
if data_type:
conditions.append(Data.type == data_type)
if start_date:
conditions.append(Data.date >= parse(start_date))
if end_date:
conditions.append(Data.date <= parse(end_date))
# Set up icd_code_to_name if needed and determine if
# alert_links are included
query_links = False
to_columns_translations = {}
for v in variables:
if "every$" in v[0]:
# Want to include all the fields in the dictionary
# in v[1] for all the links in the name
# First determine the maximum number of links
link_name = v[0].split("$")[1]
length_q = session.query(
func.max(func.jsonb_array_length(Data.links[link_name]))).filter(
*conditions)
length = length_q.first()[0]
for i in range(length):
for variable in v[1]:
name = link_name + "_" + str(i) + " " + variable[1]
return_keys.append(name)
translation_dict[name] = "many_links&" + link_name + "&" + str(i) + "&" + variable[0]
query_links = link_name
else:
return_keys.append(v[1])
translation_dict[v[1]] = v[0]
if "icd_name$" in v[0]:
category = v[0].split("$")[-1]
cat_variables = {}
res = session.query(AggregationVariables).filter(
AggregationVariables.category.has_key(category)
)
for r in res:
cat_variables.setdefault(r.id, [])
cat_variables[r.id].append(r)
icd_code_to_name[v[0]] = {}
for i in cat_variables.keys():
for var in cat_variables[i]:
condition = var.condition
if ";" in condition:
condition = condition.split(";")[0]
if "," in condition:
# If a variable have many icd codes
# we take all of them into account
codes = condition.split(",")
else:
codes = [condition]
for c in codes:
if c:
icd_code_to_name[v[0]][c.strip()] = var.name
if "$translate" in v[0]:
split = v[0].split("$")
field = "$".join(split[:-1])
trans = split[-1]
tr_dict = json.loads(trans.split(";")[1].replace("'", '"'))
# If the json specifies file details, load translation from file.
if tr_dict.get('dict_file', False):
min_translation[v[1]] = add_translations_from_file(tr_dict)
else:
min_translation[v[1]] = tr_dict
v[0] = field
translation_dict[v[1]] = v[0]
if "$to_columns" in v[0]:
# Create columns of every possible value
split = v[0].split("$")
field = "$".join(split[:-1])
trans = split[-1]
tr_dict = {}
if ";" in trans:
tr_dict = json.loads(trans.split(";")[1].replace("'", '"'))
# If the json specifies file details, load translation from file.
# Get all possible options from the DB
results = session2.query(
func.distinct(
func.regexp_split_to_table(
form_tables(param_config)[form_name].data[field].astext, ' '))).join(
Data,
Data.uuid == form_tables(param_config)[form_name].uuid).filter(
*conditions).all()
if tr_dict.get('dict_file', False):
translations = add_translations_from_file(tr_dict)
else:
translations = {}
return_keys.pop()
for r in results:
if r[0]:
name = v[1] + " " + translations.get(r[0], r[0])
if name not in return_keys:
return_keys.append(name)
if name in translation_dict:
translation_dict[name] = translation_dict[name] + "," + r[0]
else:
translation_dict[name] = field + "$to_columns$" + r[0]
if "gen_link$" in v[0]:
link_ids.append(v[0].split("$")[1])
if "uuid" not in return_keys:
return_keys.append("uuid")
translation_dict["uuid"] = "meta/instanceID"
link_ids = set(link_ids)
links_by_type, links_by_name = get_links(config_directory +
country_config["links_file"])
# DB query, with yield_per(200) for memory reasons
columns = [Data, form_tables(param_config)[form_name]]
link_id_index = {}
joins = []
if query_links:
link_data = shelve.open(base_folder + "/exported_data/" + uuid)
link_data_query = session.query(Links).filter(Links.type == link_name).yield_per(300)
for row in link_data_query:
link_data[row.uuid_to] = row.data_to
for i, l in enumerate(link_ids):
form = aliased(form_tables(param_config)[links_by_name[l]["to_form"]])
joins.append((form, Data.links[(l, -1)].astext == form.uuid))
link_id_index[l] = i + 2
columns.append(form.data)
number_query = session2.query(func.count(Data.id)).join(
form_tables(param_config)[form_name], Data.uuid == form_tables(param_config)[form_name].uuid)
results = session2.query(*columns).join(
form_tables(param_config)[form_name], Data.uuid == form_tables(param_config)[form_name].uuid)
for join in joins:
results = results.outerjoin(join[0], join[1])
total_number = number_query.filter(*conditions).first()[0]
results = results.filter(*conditions).yield_per(200)
locs = get_locations(session)
list_rows = []
filename = base_folder + "/exported_data/" + uuid + "/" + download_name
os.mkdir(base_folder + "/exported_data/" + uuid)
csv_content = open(filename + ".csv", "w")
csv_writer = csv.writer(csv_content)
csv_writer.writerows([return_keys])
# XlsxWriter with "constant_memory" set to true, flushes mem after each row
xls_content = open(filename + ".xlsx", "wb")
xls_book = xlsxwriter.Workbook(xls_content, {'constant_memory': True})
xls_sheet = xls_book.add_worksheet()
# xls_sheet = pyexcel.Sheet([keys])
# Little utility function write a row to file.
def write_xls_row(data, row, sheet):
for cell in range(len(data)):
xls_sheet.write(row, cell, data[cell])
write_xls_row(return_keys, 0, xls_sheet)
i = 0
def _list_category_variables(category, data_row):
"""
Lists the variables from the specified category that are assigned to
the specified row. This can be used to create data columns such as
'Age Group' using 'category$ncd_age'.
"""
# Get the category's variables' data, indexed by ID.
cat_variables = {}
variable_list = ""
db_results = session.query(AggregationVariables).filter(
AggregationVariables.category.has_key(category)
)
for variable in db_results:
cat_variables[variable.id] = variable
# Build a string listing the row's variables from specified category.
for var_id, var in cat_variables.items():
if var_id in r[0].variables:
variable_list += var.name + ", "
# Remove the last comma and space.
return variable_list[:-2]
# Prepare each row
for r in results:
list_row = [''] * len(return_keys)
if not is_child(allowed_location, r[0].clinic, locs):
continue
dates = {}
for k in return_keys:
form_var = translation_dict[k]
index = return_keys.index(k)
raw_data = r[1].data
if "many_links&" in form_var:
link_name, number, form_var = form_var.split("&")[1:]
number = int(number)
if link_name in r[0].links:
links = r[0].links[link_name]
if len(links) >= number + 1:
link_uuid = links[number]
raw_data = link_data[link_uuid]
else:
list_row[index] = None
continue
else:
list_row[index] = None
continue
if "icd_name$" in form_var:
fields = form_var.split("$")
if len(fields) > 2:
field = fields[1]
else:
field = "icd_code"
if raw_data[field] in icd_code_to_name[form_var]:
list_row[index] = icd_code_to_name[form_var][raw_data[
field]]
else:
list_row[index] = None
elif form_var == "clinic":
list_row[index] = locs[r[0].clinic].name
elif form_var == "region":
list_row[index] = locs[r[0].region].name
elif form_var == "zone":
list_row[index] = locs[r[0].zone].name
elif form_var == "district":
if r[0].district:
list_row[index] = locs[r[0].district].name
else:
list_row[index] = None
elif "$year" in form_var:
field = form_var.split("$")[0]
if field in raw_data and raw_data[field]:
if field not in dates:
dates[field] = parse(raw_data[field])
list_row[index] = dates[field].year
else:
list_row[index] = None
elif "$month" in form_var:
field = form_var.split("$")[0]
if field in raw_data and raw_data[field]:
if field not in dates:
dates[field] = parse(raw_data[field])
list_row[index] = dates[field].month
else:
list_row[index] = None
elif "$day" in form_var:
field = form_var.split("$")[0]
if field in raw_data and raw_data[field]:
if field not in dates:
dates[field] = parse(raw_data[field])
list_row[index] = dates[field].day
else:
list_row[index] = None
elif "$quarter" in form_var:
field = form_var.split("$")[0]
if raw_data.get(field):
if field not in dates:
dates[field] = parse(raw_data[field])
quarter = 1 + (dates[field].month - 1)//3
list_row[index] = quarter
else:
list_row[index] = None
elif "$epi_week" in form_var:
field = form_var.split("$")[0]
if field in raw_data and raw_data[field]:
if field not in dates:
dates[field] = parse(raw_data[field])
list_row[index] = epi_week_for_date(dates[field])[1]
else:
list_row[index] = None
# A general framework for referencing links in the
# download data.
# link$<link id>$<linked form field>
elif "gen_link$" in form_var:
link = form_var.split("$")[1]
link_index = link_id_index[link]
if r[link_index]:
list_row[index] = r[link_index].get(
form_var.split("$")[2],
None
)
else:
list_row[index] = None
elif "code" == form_var.split("$")[0]:
# code$cod_1,cod_2,Text_1,Text_2$default_value
split = form_var.split("$")
codes = split[1].split(",")
text = split[2].split(",")
if len(split) > 3:
default_value = split[3]
else:
default_value = None
final_text = []
for j in range(len(codes)):
if codes[j] in r[0].variables:
final_text.append(text[j])
if len(final_text) > 0:
list_row[index] = " ".join(final_text)
else:
list_row[index] = default_value
elif "category" == form_var.split("$")[0]:
list_row[index] = _list_category_variables(
form_var.split("$")[1],
r
)
elif "code_value" == form_var.split("$")[0]:
code = form_var.split("$")[1]
if code in r[0].variables:
list_row[index] = float(r[0].variables[code])
else:
list_row[index] = None
elif "value" == form_var.split(":")[0]:
list_row[index] = form_var.split(":")[1]
elif "$to_columns$" in form_var:
int_has_code = 0
field = form_var.split("$")[0]
codes = form_var.split("$")[-1].split(",")
str_elements = raw_data.get(field)
if type(str_elements) == str:
elements = str_elements.split(" ")
has_code = any(code in elements for code in codes)
int_has_code = int(has_code)
list_row[index] = int_has_code
else:
if form_var.split("$")[0] in raw_data:
list_row[index] = raw_data[form_var.split("$")[0]]
else:
list_row[index] = None
# Standardise date formating
if "$date" in form_var:
field = form_var.split("$")[0]
if list_row[index]:
if field not in dates:
dates[field] = parse(list_row[index])
list_row[index] = dates[field].strftime(
"%d/%m/%Y"
)
else:
list_row[index] = None
# If the final value is a float, round to 2 dp.
# This proceedure ensures integers are shown as integers.
# Also accepts string values.
try:
a = float(list_row[index])
b = int(float(list_row[index]))
if a == b:
list_row[index] = b
else:
list_row[index] = round(a, 2)
except (ValueError, TypeError):
pass
# If a translation dictionary is defined in which the key exists...
if min_translation and k in min_translation and list_row[index]:
tr_dict = min_translation[k]
if list_row[index] in tr_dict:
list_row[index] = tr_dict[list_row[index]]
else:
parts = [x.strip() for x in str(list_row[index]).split(' ')]
for x in range(len(parts)):
# Get the translation using the appropriate key.
# If that doesn't exist get the wild card key: *
# If that doesn't exist just return the value
parts[x] = str(
tr_dict.get(parts[x], tr_dict.get('*', parts[x]))
)
list_row[index] = ' '.join(list(filter(bool, parts)))
if translation_dir and language != "en" and list_row[index]:
list_row[index] = t.gettext(list_row[index])
list_rows.append(list_row)
# Can write row immediately to xls file as memory is flushed after.
write_xls_row(list_row, i + 1, xls_sheet)
# Append the row to list of rows to be written to csv.
if i % 1000 == 0:
logging.warning("{} rows completed...".format(i))
csv_writer.writerows(list_rows)
list_rows = []
status.status = i / total_number
session.commit()
i += 1
csv_writer.writerows(list_rows)
csv_content.close()
xls_book.close()
xls_content.close()
status.status = 1
status.success = 1
session.commit()
if query_links:
link_data.close()
dir_path = os.path.dirname(os.path.realpath(__file__))
filename = dir_path + "/exported_data/" + uuid
logging.warning("Filename: " + filename)
if os.path.exists(filename + ".dir"):
os.remove(filename + ".dir")
if os.path.exists(filename + ".dat"):
os.remove(filename + ".dat")
return True
def construct_completeness_call(variable_config, sublevel, start_date, end_date):
"""
Construct the correct completess calls based on the dates
Args:\n
variable_config: The base api call
sublevel: The level to aggregate over
start_date: Start date
end_date: End date
"""
api_calls = []
for year in range(start_date.year, end_date.year + 1):
year_start_week = 1
year_end_date_str = "{}-12-31".format(year)
if year == start_date.year:
year_start_week = epi_week_for_date(start_date)[1]
if year_start_week > 52:
year_start_week = 1
if year == end_date.year:
year_end_date_str = end_date.isoformat()
api_call = variable_config.split(":")[1]
api_call = api_call.replace("<start_week>", str(year_start_week))
api_call = api_call.replace("<end_date>", str(year_end_date_str))
api_call += "?sublevel={}".format(sublevel)
api_calls.append((api_call, year, year_start_week))
return api_calls
def _export_week_level_completeness(uuid, download_name, level,
completeness_config, translator, param_config,
start_date=None, end_date=None,
wide_data_format=False):
"""
Exports completeness data by location and week ( and year),
Args:\n
uuid: uuid for the download process
download_name: Name of download file
level: level of location
competeness_config: Specified the completeness call we want to make
translator: Translator
param_config: param config
start_date: The date to start the data set
end_date: End date for the aggregation
wide_data_format: If true the data is returned in the wide format, else in long format
"""
db, session = get_db_engine()
locs = get_locations(session)
operation_status = OperationStatus(download_name, uuid)
if start_date:
start_date = parse(start_date).replace(tzinfo=None)
if end_date:
end_date = parse(end_date).replace(tzinfo=None)
completeness_calls = construct_completeness_call(completeness_config[0],
level,
start_date,
end_date)
jwt_auth_token = meerkat_libs.authenticate(
username=param_config.server_auth_username,
password=param_config.server_auth_password,
auth_root=param_config.auth_root)
if not jwt_auth_token:
raise AttributeError("Not sucessfully logged in for api access")
headers = {'content-type': 'application/json',
'authorization': 'Bearer {}'.format(jwt_auth_token)}
data = []
year_label = translator.gettext("Year")
location_label = translator.gettext(level.title())
week_label = translator.gettext("Week")
district_label = translator.gettext("District")
completeness_config_label = translator.gettext(completeness_config[1])
for call, year, start_week in completeness_calls:
api_result = requests.get(param_config.api_root + call, headers=headers)
timeline = api_result.json()["timeline"]
max_per_week = int(call.split("/")[4]) # Extract the maximum number from api call
for location in timeline:
loc_id = int(location)
for week in range(len(timeline[location]["weeks"])):
week_start_day = datetime.strptime(timeline[location]["weeks"][week], '%Y-%m-%dT%H:%M:%S')
data.append({year_label: year,
location_label: locs[loc_id].name,
week_label: epi_week_for_date(week_start_day)[1],
completeness_config_label: timeline[location]["values"][week] / max_per_week * 100
})
if level == "clinic" and loc_id != 1:
data[-1][district_label] = locs[locs[loc_id].parent_location].name
filename = base_folder + "/exported_data/" + uuid + "/" + download_name
os.mkdir(base_folder + "/exported_data/" + uuid)
df = pandas.DataFrame(data)
if wide_data_format:
if level == "clinic":
index_labels = [year_label, district_label, location_label, week_label]
else:
index_labels = [year_label, location_label, week_label]
df = df.set_index(index_labels).unstack()
df.to_csv(filename + ".csv")
df.to_excel(filename + ".xlsx")
operation_status.submit_operation_success()
def get_translator(param_config, language):
translation_dir = param_config.country_config.get("translation_dir", None)
if translation_dir:
try:
t = gettext.translation('messages', translation_dir, languages=["en", "fr"])
except (FileNotFoundError, OSError):
logging.warning("Translations not found", exc_info=True)
t = gettext.NullTranslations()
else:
t = gettext.NullTranslations()
if language != "en":
os.environ["LANGUAGE"] = language
return t
@app.task
def export_week_level(uuid, download_name, level,
variable_config, start_date=None, end_date=None,
wide_data_format=False, language="en",
param_config_yaml=yaml.dump(config)):
"""
Export aggregated data by location and week ( and year),
Args:\n
uuid: uuid for the download process
download_name: Name of download file
level: level of location
variable_config: the variable we want to aggregate
data_orientation: long or wide data set
start_date: The date to start the data set
end_date: End date for the aggregation
wide_data_format: If true the data is returned in the wide format, else in long format
param_config: The configuration values
"""
param_config = yaml.load(param_config_yaml)
translator = get_translator(param_config, language)
if "completeness" in variable_config[0]:
_export_week_level_completeness(uuid, download_name, level,
variable_config, translator,
param_config, start_date=start_date,
end_date=end_date, wide_data_format=wide_data_format)
else:
_export_week_level_variable(uuid, download_name, level,
variable_config, translator,
start_date=start_date, end_date=end_date,
wide_data_format=wide_data_format,
param_config_yaml=param_config_yaml)
def _export_week_level_variable(uuid, download_name, level,
variable_config, translator,
start_date=None, end_date=None,
wide_data_format=False,
param_config_yaml=yaml.dump(config)):
"""
Export aggregated data by location and week ( and year),
Args:\n
uuid: uuid for the download process
download_name: Name of download file
level: level of location
variable_config: the variable we want to aggregate. Consits of the restrict_by, variable to aggregate and the display name
data_orientation: long or wide data set
start_date: The date to start the data set
end_date: End date for the aggregation
wide_data_format: If true the data is returned in the wide format, else in long format
param_config: The configuration values
"""
restrict_by, variable, display_name = variable_config
if level == "clinic":
group_by = [["epi_year", translator.gettext("Year")],
["district:location", translator.gettext("District")],
[level + ":location", translator.gettext(level.title())],
["epi_week", translator.gettext("Week")]
]
else:
group_by = [["epi_year", translator.gettext("Year")],
[level + ":location", translator.gettext(level.title())],
["epi_week", translator.gettext("Week")]
]
return export_data_table(uuid,
download_name,
restrict_by,
[[variable, display_name]],
group_by,
start_date=start_date,
end_date=end_date,
wide_data_format=wide_data_format,
param_config_yaml=param_config_yaml)
@app.task
def export_data_table(uuid, download_name,
restrict_by, variables, group_by,
location_conditions=None,
start_date=None, end_date=None,
wide_data_format=False,
param_config_yaml=yaml.dump(config)):
"""
Export an aggregated data table restricted by restrict by,
Args:\n
uuid: uuid for the download process
variables: the variables we want to aggregate
group_by: The data to group by (clinic, epi_week)
data_orientation: long or wide data set
start_date: The date to start the data set
end_date: End date for the aggregation
wide_data_format: If true the data is returned in the wide format, else in long format
param_config: The configuration values
"""
return_keys = []
db, session = get_db_engine()
locs = get_locations(session)
list_rows = []
operation_status = OperationStatus(download_name, uuid)
level = "region"
columns = []
groups = []
location_subs = []
only_latest_from_clinic_in_week = False
if "only_latest_from_clinic_in_week:" in restrict_by:
restrict_by_variable = restrict_by.split(":")[1]
only_latest_from_clinic_in_week = True
else:
restrict_by_variable = restrict_by
for i, v in enumerate(group_by):
field = v[0]
if ":location" in field:
field_column = field.split(":")[0]
level = field_column
location_subs.append(i)
else:
field_column = field
columns.append(getattr(Data, field_column))
groups.append(getattr(Data, field_column))
return_keys.append(v[1])
conditions = [Data.variables.has_key(restrict_by_variable)]
if start_date:
start_date = parse(start_date).replace(tzinfo=None)
conditions.append(Data.date >= start_date)
if end_date:
end_date = parse(end_date).replace(tzinfo=None)
conditions.append(Data.date <= end_date)
for v in variables:
if only_latest_from_clinic_in_week:
columns.append(Data.variables[v[0]].astext.cast(Float))
else:
columns.append(func.sum(Data.variables[v[0]].astext.cast(Float)))
return_keys.append(v[1])
if only_latest_from_clinic_in_week:
conditions.append(Data.variables.has_key(restrict_by_variable))
result = session.query(*columns).distinct(Data.clinic).filter(*conditions).order_by(Data.clinic).order_by(Data.date.desc())
else:
result = session.query(*columns).filter(*conditions).group_by(*groups)
filename = base_folder + "/exported_data/" + uuid + "/" + download_name
os.mkdir(base_folder + "/exported_data/" + uuid)
i = 0
for row in result:
row_list = list(row)
location_condition = True
for l in location_subs:
if row_list[l]:
if location_conditions:
tmp = getattr(locs[row_list[l]], location_conditions[0][0])
if location_conditions[0][1] in tmp:
location_condition = False
row_list[l] = locs[row_list[l]].name
if location_condition:
row_list = [x if x is not None else 0 for x in row_list]
list_rows.append(row_list)
i += 1
df = pandas.DataFrame(list_rows, columns=return_keys)
if wide_data_format:
df = df.set_index(return_keys[:-len(variables)]).unstack().fillna(0)
df.to_csv(filename + ".csv")
df.to_excel(filename + ".xlsx")
operation_status.submit_operation_success()
return True
@app.task
def export_form(uuid, form, allowed_location, fields=None, param_config_yaml=yaml.dump(config)):
"""
Export a form. If fields is in the request variable we only include
those fields.
Starts background export
Args:\n
uuid: uuid of download\n
form: the form to export\n
allowed_location: will extract result only for this location
fields: Fields from form to export\n
Returns:\n
bool: The return value. True for success, False otherwise.\n
"""
# Runner loads the config object through a function parameter.
param_config = yaml.load(param_config_yaml)
db, session = get_db_engine()
operation_status = OperationStatus(form, uuid)
if form not in form_tables(param_config):
operation_status.submit_operation_failure()
return False
location_data = all_location_data(session)
locs_by_deviceid = location_data[1]
if locs_by_deviceid is None:
operation_status.submit_operation_failure()
return False
if fields:
keys = fields
else:
keys = __get_keys_from_db(db, form, param_config)
xls_csv_writer = XlsCsvFileWriter(base_folder, form, uuid)
xls_csv_writer.write_xls_row(keys)
xls_csv_writer.write_csv_row(keys)
query_form_data = session.query(form_tables(param_config)[form].data)
__save_form_data(xls_csv_writer, query_form_data, operation_status, keys, allowed_location, location_data)
operation_status.submit_operation_success()
xls_csv_writer.flush_csv_buffer()
xls_csv_writer.close_cvs_xls_buffers()
return True
def __get_keys_from_db(db, form, param_config=config):
keys = ["clinic", "region", "district"]
sql = text(f"SELECT DISTINCT(jsonb_object_keys(data)) from {form_tables(param_config)[form].__tablename__}")
results = db.execute(sql)
for r in results:
keys.append(r[0])
return keys
class OperationStatus:
def __init__(self, form, uuid):
self.db, self.session = get_db_engine()
self.__initialize(form, uuid)
def __initialize(self, form, uuid):
self.download_data_file = DownloadDataFiles(uuid=uuid,
generation_time=datetime.now(),
type=form,
success=0,
status=0.0)
self.session.add(self.download_data_file)
self.session.commit()
def update_operation_status(self, status):
self.download_data_file.status = status
self.download_data_file.success = 0
self.session.commit()
def submit_operation_success(self):
self.download_data_file.status = 1.0
self.download_data_file.success = 1
self.session.commit()
def submit_operation_failure(self):
self.download_data_file.status = 1.0
self.download_data_file.success = 0
self.session.commit()
def __save_form_data(xls_csv_writer, query_form_data, operation_status, keys, allowed_location, location_data):
(locations, locs_by_deviceid, zones, regions, districts, devices) = location_data
results = query_form_data.yield_per(1000)
results_count = query_form_data.count()
for i, result in enumerate(results):
if not result:
logging.error("Skipping result %d which is None", i)
continue
if not result.data:
logging.error("Skipping result %d. Data is None", i)
continue
if not isinstance(result.data, dict):
logging.error("Skipping result %d which data is not of a dictionary type", i)
continue
# Initialise empty result for header line
row = []
for key in keys:
try:
row.append(result.data.get(key, ''))
except AttributeError:
logging.exception("Error while parsing row %s with data:\n%s", result, result.data, exc_info=True)
# Add the location data if it has been requested and exists.
if 'deviceid' in result.data:
clinic_id = locs_by_deviceid.get(result.data["deviceid"], None)
if not is_child(allowed_location, clinic_id, locations):
continue
populate_row_locations(row, keys, clinic_id, location_data)
else:
if allowed_location != 1:
continue
set_empty_locations(keys, row)
xls_csv_writer.write_xls_row(row)
xls_csv_writer.write_csv_row(row)
five_percent_progress = i % (results_count / 20) == 0
if five_percent_progress:
new_status = float(i) / results_count
operation_status.update_operation_status(new_status)
if __name__ == "__main__":
import uuid
export_data_table(
str(uuid.uuid4()), "test", "reg_1", [["reg_2", "Consultations"]],
[["epi_year", "year"], ["clinic:location", "clinic"], ["epi_week", "week"]],
location_conditions=[["case_type", "SARI"]]
)
| mit |
JeanKossaifi/scikit-learn | examples/cluster/plot_cluster_comparison.py | 246 | 4684 | """
=========================================================
Comparing different clustering algorithms on toy datasets
=========================================================
This example aims at showing characteristics of different
clustering algorithms on datasets that are "interesting"
but still in 2D. The last dataset is an example of a 'null'
situation for clustering: the data is homogeneous, and
there is no good clustering.
While these examples give some intuition about the algorithms,
this intuition might not apply to very high dimensional data.
The results could be improved by tweaking the parameters for
each clustering strategy, for instance setting the number of
clusters for the methods that needs this parameter
specified. Note that affinity propagation has a tendency to
create many clusters. Thus in this example its two parameters
(damping and per-point preference) were set to to mitigate this
behavior.
"""
print(__doc__)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cluster, datasets
from sklearn.neighbors import kneighbors_graph
from sklearn.preprocessing import StandardScaler
np.random.seed(0)
# Generate datasets. We choose the size big enough to see the scalability
# of the algorithms, but not too big to avoid too long running times
n_samples = 1500
noisy_circles = datasets.make_circles(n_samples=n_samples, factor=.5,
noise=.05)
noisy_moons = datasets.make_moons(n_samples=n_samples, noise=.05)
blobs = datasets.make_blobs(n_samples=n_samples, random_state=8)
no_structure = np.random.rand(n_samples, 2), None
colors = np.array([x for x in 'bgrcmykbgrcmykbgrcmykbgrcmyk'])
colors = np.hstack([colors] * 20)
clustering_names = [
'MiniBatchKMeans', 'AffinityPropagation', 'MeanShift',
'SpectralClustering', 'Ward', 'AgglomerativeClustering',
'DBSCAN', 'Birch']
plt.figure(figsize=(len(clustering_names) * 2 + 3, 9.5))
plt.subplots_adjust(left=.02, right=.98, bottom=.001, top=.96, wspace=.05,
hspace=.01)
plot_num = 1
datasets = [noisy_circles, noisy_moons, blobs, no_structure]
for i_dataset, dataset in enumerate(datasets):
X, y = dataset
# normalize dataset for easier parameter selection
X = StandardScaler().fit_transform(X)
# estimate bandwidth for mean shift
bandwidth = cluster.estimate_bandwidth(X, quantile=0.3)
# connectivity matrix for structured Ward
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
# make connectivity symmetric
connectivity = 0.5 * (connectivity + connectivity.T)
# create clustering estimators
ms = cluster.MeanShift(bandwidth=bandwidth, bin_seeding=True)
two_means = cluster.MiniBatchKMeans(n_clusters=2)
ward = cluster.AgglomerativeClustering(n_clusters=2, linkage='ward',
connectivity=connectivity)
spectral = cluster.SpectralClustering(n_clusters=2,
eigen_solver='arpack',
affinity="nearest_neighbors")
dbscan = cluster.DBSCAN(eps=.2)
affinity_propagation = cluster.AffinityPropagation(damping=.9,
preference=-200)
average_linkage = cluster.AgglomerativeClustering(
linkage="average", affinity="cityblock", n_clusters=2,
connectivity=connectivity)
birch = cluster.Birch(n_clusters=2)
clustering_algorithms = [
two_means, affinity_propagation, ms, spectral, ward, average_linkage,
dbscan, birch]
for name, algorithm in zip(clustering_names, clustering_algorithms):
# predict cluster memberships
t0 = time.time()
algorithm.fit(X)
t1 = time.time()
if hasattr(algorithm, 'labels_'):
y_pred = algorithm.labels_.astype(np.int)
else:
y_pred = algorithm.predict(X)
# plot
plt.subplot(4, len(clustering_algorithms), plot_num)
if i_dataset == 0:
plt.title(name, size=18)
plt.scatter(X[:, 0], X[:, 1], color=colors[y_pred].tolist(), s=10)
if hasattr(algorithm, 'cluster_centers_'):
centers = algorithm.cluster_centers_
center_colors = colors[:len(centers)]
plt.scatter(centers[:, 0], centers[:, 1], s=100, c=center_colors)
plt.xlim(-2, 2)
plt.ylim(-2, 2)
plt.xticks(())
plt.yticks(())
plt.text(.99, .01, ('%.2fs' % (t1 - t0)).lstrip('0'),
transform=plt.gca().transAxes, size=15,
horizontalalignment='right')
plot_num += 1
plt.show()
| bsd-3-clause |
lfairchild/PmagPy | pmagpy_tests/test_imports3.py | 1 | 65257 | #!/usr/bin/env python
import unittest
import os
#import sys
from pmagpy import pmag
from pmagpy import contribution_builder as cb
from pmagpy import convert_2_magic as convert
WD = pmag.get_test_WD()
class Test2g_bin_magic(unittest.TestCase):
def setUp(self):
os.chdir(WD)
def tearDown(self):
#input_dir = os.path.join(WD, 'data_files', 'convert_2_magic',
#'IODP_jr6_magic')
#files = ['test.magic', 'other_er_samples.txt']
files = ['mn001-1a.magic', 'samples.txt', 'sites.txt',
'measurements.txt', 'locations.txt', 'specimens.txt']
pmag.remove_files(files, WD)
pmag.remove_files(['custom_specimens.txt', 'samples.txt',
'sites.txt', 'locations.txt'], 'data_files')
pmag.remove_files(files, os.path.join(WD, 'data_files', 'convert_2_magic',
'2g_bin_magic', 'mn1'))
os.chdir(WD)
def test_2g_with_no_files(self):
options = {}
program_ran, error_message = convert._2g_bin(**options)
self.assertFalse(program_ran)
self.assertEqual(error_message, 'mag file is required input')
def test_2g_with_files(self):
options = {}
options['input_dir'] = os.path.join(WD, 'data_files', 'convert_2_magic',
'2g_bin_magic', 'mn1')
options['mag_file'] = 'mn001-1a.dat'
program_ran, outfile = convert._2g_bin(**options)
self.assertTrue(program_ran)
self.assertEqual(os.path.split(outfile)[1], 'measurements.txt')
self.assertTrue(os.path.isfile(outfile))
meas_df = cb.MagicDataFrame(outfile)
self.assertIn('sequence', meas_df.df.columns)
def test_2g_fail_option4(self):
options = {}
options['input_dir'] = os.path.join(WD, 'data_files',
'convert_2_magic',
'2g_bin_magic', 'mn1')
options['mag_file'] = 'mn001-1a.dat'
options['samp_con'] = '4'
program_ran, error_message = convert._2g_bin(**options)
self.assertFalse(program_ran)
self.assertEqual(error_message, 'option [4] must be in form 4-Z where Z is an integer')
def test_2g_succeed_option4(self):
options = {}
options['input_dir'] = os.path.join(WD, 'data_files', 'convert_2_magic',
'2g_bin_magic', 'mn1')
options['mag_file'] = 'mn001-1a.dat'
options['samp_con'] = '4-3'
program_ran, outfile = convert._2g_bin(**options)
self.assertTrue(program_ran)
self.assertEqual(os.path.split(outfile)[1], 'measurements.txt')
def test_2g_fail_option7(self):
options = {}
options['input_dir'] = os.path.join(WD, 'data_files', 'convert_2_magic',
'2g_bin_magic', 'mn1')
options['mag_file'] = 'mn001-1a.dat'
options['samp_con'] = '7'
program_ran, error_message = convert._2g_bin(**options)
self.assertFalse(program_ran)
self.assertEqual(error_message, 'option [7] must be in form 7-Z where Z is an integer')
def test_2g_succeed_option7(self):
options = {}
options['input_dir'] = os.path.join(WD, 'data_files', 'convert_2_magic',
'2g_bin_magic', 'mn1')
options['mag_file'] = 'mn001-1a.dat'
options['samp_con'] = '7-3'
program_ran, outfile = convert._2g_bin(**options)
self.assertTrue(program_ran)
self.assertEqual(os.path.split(outfile)[1], 'measurements.txt')
def test_2g_fail_option6(self):
options = {}
options['input_dir'] = os.path.join(WD, 'data_files', 'convert_2_magic',
'2g_bin_magic', 'mn1')
options['mag_file'] = 'mn001-1a.dat'
options['samp_con'] = '6'
program_ran, error_message = convert._2g_bin(**options)
self.assertFalse(program_ran)
self.assertEqual(error_message, 'Naming convention option [6] not currently supported')
def test_2g_with_bad_file(self):
options = {}
options['input_dir'] = os.path.join(WD, 'data_files', 'convert_2_magic',
'2g_bin_magic', 'mn1')
options['mag_file'] = 'mn001-1ax.dat'
program_ran, error_message = convert._2g_bin(**options)
self.assertFalse(program_ran)
self.assertEqual(error_message, "bad mag file")
def test_2g_with_options(self):
options = {}
options['input_dir'] = os.path.join(WD, 'data_files', 'convert_2_magic',
'2g_bin_magic', 'mn1')
options['mag_file'] = 'mn001-1a.dat'
options['meas_file'] = 'mn001-1a.magic'
options['samp_con'] = '4-3'
options['inst'] = 'instrument'
options['noave'] = 0
options['specnum'] = 2
options['location'] = 'location'
options['or_con'] = '4'
options['gmeths'] = 'FS-LOC-MAP:SO-POM'
program_ran, outfile = convert._2g_bin(**options)
self.assertTrue(program_ran)
self.assertEqual(os.path.split(outfile)[1], 'mn001-1a.magic')
def test_2g_with_path(self):
options = {}
input_dir = os.path.join(WD, 'data_files', 'convert_2_magic',
'2g_bin_magic', 'mn1')
#options['input_dir'] = os.path.join(WD, 'data_files', 'convert_2_magic',
# '2g_bin_magic', 'mn1')
options['mag_file'] = os.path.join(input_dir, 'mn001-1a.dat')
options['meas_file'] = os.path.join(input_dir, 'mn001-1a.magic')
options['spec_file'] = os.path.join('data_files', 'custom_specimens.txt')
options['dir_path'] = 'data_files'
program_ran, outfile = convert._2g_bin(**options)
self.assertEqual(outfile, options['meas_file'])
self.assertTrue(os.path.exists(options['meas_file']))
self.assertTrue(os.path.exists(os.path.join('data_files', 'sites.txt')))
class TestAgmMagic(unittest.TestCase):
def setUp(self):
os.chdir(WD)
def tearDown(self):
filelist = ['measurements.txt', 'specimens.txt',
'samples.txt', 'sites.txt', 'locations.txt',
'agm_magic_example.magic', 'agm_magic_example_locations.txt',
'agm_magic_example_specimens.txt']
pmag.remove_files(filelist, WD)
os.chdir(WD)
def test_success(self):
input_dir = os.path.join(WD, 'data_files',
'convert_2_magic', 'agm_magic')
program_ran, filename = convert.agm('agm_magic_example.agm',
meas_outfile='agm_magic_example.magic',
input_dir_path=input_dir, fmt="old")
self.assertTrue(program_ran)
def test_backfield_success(self):
input_dir = os.path.join(WD, 'data_files',
'convert_2_magic', 'agm_magic')
program_ran, filename = convert.agm('agm_magic_example.irm',
meas_outfile='agm_magic_example.magic',
input_dir_path=input_dir, fmt="old", bak=True,
instrument="SIO-FLO")
class TestBgcMagic(unittest.TestCase):
def setUp(self):
os.chdir(WD)
self.input_dir = os.path.join(WD, 'data_files',
'convert_2_magic', 'bgc_magic')
def tearDown(self):
filelist = ['96MT.05.01.magic', 'BC0-3A.magic',
'measurements.txt', 'specimens.txt',
'samples.txt', 'sites.txt']
pmag.remove_files(filelist, self.input_dir)
filelist = ['specimens.txt', 'samples.txt', 'sites.txt',
'locations.txt', 'custom_specimens.txt', 'measurements.txt']
pmag.remove_files(filelist, WD)
pmag.remove_files(filelist, os.path.join(WD, 'data_files'))
os.chdir(WD)
def test_bgc_with_no_files(self):
with self.assertRaises(TypeError):
convert.bgc()
def test_bgc_success(self):
options = {'input_dir_path': self.input_dir, 'mag_file': '96MT.05.01'}
program_ran, outfile = convert.bgc(**options)
self.assertTrue(program_ran)
self.assertEqual(outfile, os.path.join(WD, 'measurements.txt'))
meas_df = cb.MagicDataFrame(outfile)
self.assertIn('sequence', meas_df.df.columns)
def test_bgc_with_path(self):
options = {}
options['mag_file'] = os.path.join(self.input_dir, '96MT.05.01')
options['spec_file'] = os.path.join(WD, 'custom_specimens.txt')
options['dir_path'] = 'data_files'
program_ran, outfile = convert.bgc(**options)
self.assertEqual(outfile, os.path.join(WD, 'data_files', 'measurements.txt'))
self.assertTrue(os.path.isfile(options['spec_file']))
self.assertTrue(os.path.isfile(os.path.join(WD, 'data_files', 'samples.txt')))
def test_bgc_alternate_infile(self):
options = {'input_dir_path': self.input_dir, 'mag_file': 'BC0-3A'}
program_ran, outfile = convert.bgc(**options)
self.assertTrue(program_ran)
self.assertEqual(outfile, os.path.join(WD, 'measurements.txt'))
def test_bgc_with_append(self):
options = {'input_dir_path': self.input_dir, 'mag_file': 'BC0-3A'}
program_ran, outfile = convert.bgc(**options)
self.assertTrue(program_ran)
options['append'] = True
program_ran, outfile = convert.bgc(**options)
self.assertTrue(program_ran)
lines, file_type = pmag.magic_read(os.path.join(WD, 'specimens.txt'))
self.assertEqual(len(lines), 2)
class TestCitMagic(unittest.TestCase):
def setUp(self):
os.chdir(WD)
def tearDown(self):
filelist = ['measurements.txt', 'specimens.txt',
'samples.txt', 'sites.txt', 'locations.txt']
pmag.remove_files(filelist, WD)
#loc_file = 'custom_locations.txt'
filelist = ['measurements.txt', 'specimens.txt',
'samples.txt', 'sites.txt', 'custom_locations.txt']
dir_path = os.path.join(WD, 'data_files')
pmag.remove_files(filelist, dir_path)
samp_file = 'custom_samples.txt'
dir_path = os.path.join(WD, 'data_files',
'convert_2_magic',
'cit_magic', 'PI47')
pmag.remove_files([samp_file], dir_path)
os.chdir(WD)
def test_cit_with_no_files(self):
program_ran, error_message = convert.cit()
self.assertFalse(program_ran)
self.assertEqual(error_message, 'bad sam file name')
def test_cit_magic_with_file(self):
options = {}
options['input_dir_path'] = os.path.join(WD, 'data_files',
'convert_2_magic',
'cit_magic', 'PI47')
options['magfile'] = 'PI47-.sam'
program_ran, outfile = convert.cit(**options)
self.assertTrue(program_ran)
expected_file = os.path.join('measurements.txt')
self.assertEqual(outfile, expected_file)
meas_df = cb.MagicDataFrame(outfile)
self.assertIn('sequence', meas_df.df.columns)
def test_cit_magic_with_path(self):
options = {}
#options['input_dir_path'] = os.path.join(WD, 'data_files',
# 'convert_2_magic',
# 'cit_magic', 'PI47')pppp
options['magfile'] = os.path.join(WD, 'data_files',
'convert_2_magic',
'cit_magic', 'PI47', 'PI47-.sam')
options['loc_file'] = 'custom_locations.txt'
options['samp_file'] = os.path.join(WD, 'data_files',
'convert_2_magic',
'cit_magic', 'PI47', 'custom_samples.txt')
options['dir_path'] = os.path.join(WD, 'data_files')
program_ran, outfile = convert.cit(**options)
self.assertTrue(program_ran)
expected_file = os.path.join('measurements.txt')
self.assertEqual(outfile, expected_file)
for fname in [os.path.join(WD, 'data_files', options['loc_file']),
options['samp_file'],
os.path.join(WD, 'data_files', 'specimens.txt')]:
self.assertTrue(os.path.isfile(fname))
def test_cit_magic_fail_option4(self):
options = {}
options['input_dir_path'] = os.path.join(WD, 'data_files',
'convert_2_magic',
'cit_magic', 'PI47')
options['magfile'] = 'PI47-.sam'
options['samp_con'] = '4'
program_ran, error_message = convert.cit(**options)
self.assertFalse(program_ran)
self.assertEqual(error_message, "naming convention option [4] must be in form 4-Z where Z is an integer")
def test_cit_magic_succeed_option4(self):
options = {}
options['input_dir_path'] = os.path.join(WD, 'data_files',
'convert_2_magic',
'cit_magic', 'PI47')
options['magfile'] = 'PI47-.sam'
options['samp_con'] = '4-3'
program_ran, outfile = convert.cit(**options)
self.assertTrue(program_ran)
expected_file = os.path.join('measurements.txt')
self.assertEqual(outfile, expected_file)
def test_cit_magic_with_options(self):
options = {}
options['input_dir_path'] = os.path.join(WD, 'data_files',
'convert_2_magic',
'cit_magic', 'PI47')
options['magfile'] = 'PI47-.sam'
options['samp_con'] = '2'
options['methods'] = ['SO-SM:SO-MAG']
options['locname'] = 'location'
options['noave'] = 1
options['specnum'] = 2
program_ran, outfile = convert.cit(**options)
self.assertTrue(program_ran)
expected_file = os.path.join('measurements.txt')
self.assertEqual(outfile, expected_file)
def test_cit_magic_with_other_data(self):
options = {}
options['input_dir_path'] = os.path.join(WD, 'data_files',
'convert_2_magic',
'cit_magic', 'PI47')
options['magfile'] = 'PI47-.sam'
options['samp_con'] = '1'
options['methods'] = ['SO-SM:SO-MAG']
options['locname'] = 'location'
options['noave'] = 1
options['specnum'] = 2
program_ran, outfile = convert.cit(**options)
self.assertTrue(program_ran)
expected_file = os.path.join('measurements.txt')
self.assertEqual(outfile, expected_file)
class TestGenericMagic(unittest.TestCase):
def setUp(self):
os.chdir(WD)
def tearDown(self):
filelist = ['generic_magic_example.magic']
directory = os.path.join(WD, 'data_files', 'convert_2_magic',
'generic_magic')
pmag.remove_files(filelist, directory)
filelist = ['measurements.txt', 'specimens.txt',
'samples.txt', 'sites.txt', 'locations.txt']
pmag.remove_files(filelist, WD)
os.chdir(WD)
def test_generic_magic_no_exp(self):
dir_path = os.path.join('data_files', 'convert_2_magic',
'generic_magic')
options = {}
options['magfile'] = os.path.join(dir_path, 'generic_magic_example.txt')
options['meas_file'] = os.path.join(dir_path, 'generic_magic_example.magic')
program_ran, error_message = convert.generic(**options)
self.assertFalse(program_ran)
no_exp_error = "Must provide experiment. Please provide experiment type of: Demag, PI, ATRM n (n of positions), CR (see help for format), NLT"
self.assertEqual(no_exp_error, error_message)
def test_generic_magic_success(self):
dir_path = os.path.join('data_files', 'convert_2_magic',
'generic_magic')
options = {}
options['magfile'] = os.path.join(dir_path, 'generic_magic_example.txt')
options['meas_file'] = os.path.join(dir_path, 'generic_magic_example.magic')
options['experiment'] = 'Demag'
program_ran, outfile_name = convert.generic(**options)
self.assertTrue(program_ran)
self.assertEqual(os.path.realpath(outfile_name), os.path.realpath(options['meas_file']))
class TestHujiMagic(unittest.TestCase):
def setUp(self):
os.chdir(WD)
def tearDown(self):
filelist = ['Massada_AF_HUJI_new_format.magic']
directory = os.path.join(WD, 'data_files', 'convert_2_magic',
'huji_magic')
pmag.remove_files(filelist, directory)
filelist = ['measurements.txt', 'specimens.txt',
'samples.txt', 'sites.txt', 'locations.txt',
'Massada_AF_HUJI_new_format.magic']
pmag.remove_files(filelist, WD)
os.chdir(WD)
def test_with_bad_file(self):
program_ran, error_msg = convert.huji()
self.assertFalse(program_ran)
self.assertEqual(error_msg, "mag_file field is a required option")
program_ran, error_msg = convert.huji("fake")
self.assertFalse(program_ran)
self.assertEqual(error_msg, "bad mag file name")
def test_huji_magic_success(self):
dir_path = os.path.join('data_files', 'convert_2_magic',
'huji_magic')
full_file = os.path.join(dir_path, "Massada_AF_HUJI_new_format.txt")
options = {}
options['input_dir_path'] = dir_path
options['magfile'] = "Massada_AF_HUJI_new_format.txt"
options['meas_file'] = "Massada_AF_HUJI_new_format.magic"
options['codelist'] = 'AF'
program_ran, outfile = convert.huji(**options)
self.assertTrue(program_ran)
self.assertEqual(outfile, options['meas_file'])
def test_with_options(self):
dir_path = os.path.join('data_files', 'convert_2_magic',
'huji_magic')
options = {}
options['dir_path'] = dir_path
options['magfile'] = "Massada_AF_HUJI_new_format.txt"
options['meas_file'] = "Massada_AF_HUJI_new_format.magic"
options['codelist'] = "AF"
options['location'] = "Massada"
options['noave'] = True
options['user'] = "me"
options['labfield'] = 40
options['phi'] = 0
options['theta'] = 90
program_ran, outfile = convert.huji(**options)
self.assertTrue(program_ran)
self.assertEqual(outfile, options['meas_file'])
def test_with_no_exp_type(self):
dir_path = os.path.join('data_files', 'convert_2_magic', 'huji_magic')
mag_file = "Massada_AF_HUJI_new_format.txt"
res, error = convert.huji(mag_file, dir_path)
self.assertFalse(res)
self.assertEqual(error, "Must select experiment type (codelist/-LP, options are: [AF, T, ANI, TRM, CR])")
class TestHujiSampleMagic(unittest.TestCase):
def setUp(self):
os.chdir(WD)
def tearDown(self):
filelist = ['samples.txt', 'sites.txt']
directory = os.path.join(WD, 'data_files', 'convert_2_magic',
'huji_magic')
pmag.remove_files(filelist, directory)
filelist = ['measurements.txt', 'specimens.txt',
'samples.txt', 'sites.txt', 'locations.txt',
'Massada_AF_HUJI_new_format.magic']
pmag.remove_files(filelist, WD)
os.chdir(WD)
def test_success(self):
res, outfile = convert.huji_sample("magdelkrum_datafile.txt",
dir_path=os.path.join(WD, 'data_files', 'convert_2_magic', 'huji_magic'))
self.assertTrue(res)
self.assertEqual(outfile, os.path.join(WD, 'data_files', 'convert_2_magic', 'huji_magic', 'samples.txt'))
class TestIodpSrmMagic(unittest.TestCase):
def setUp(self):
os.chdir(WD)
def tearDown(self):
filelist = ['measurements.txt', 'specimens.txt', 'samples.txt',
'sites.txt', 'locations.txt',
'IODP_LIMS_SRMsection_366_U1494.csv.magic',
'IODP_LIMS_SRMsection_366_U1494_locations.txt',
'IODP_LIMS_SRMsection_366_U1494_samples.txt',
'IODP_LIMS_SRMsection_366_U1494_sites.txt',
'IODP_LIMS_SRMsection_366_U1494_specimens.txt']
dir_path = os.path.join(WD, 'data_files', 'UTESTA', 'SRM_data')
#directory = os.path.join(WD)
pmag.remove_files(filelist, dir_path)
dir_path = os.path.join(WD, 'data_files', 'convert_2_magic', 'iodp_srm_magic')
pmag.remove_files(filelist, dir_path)
dir_path = WD
pmag.remove_files(filelist, dir_path)
os.chdir(WD)
def test_iodp_with_no_files(self):
program_ran, error_message = convert.iodp_srm()
self.assertFalse(program_ran)
self.assertEqual(error_message, 'No .csv files were found')
#@unittest.skip("iodp_srm_magic is missing an example datafile")
def test_iodp_with_files(self):
options = {}
dir_path = os.path.join(WD, 'data_files', 'convert_2_magic',
'iodp_srm_magic')
options['dir_path'] = dir_path
files = os.listdir(dir_path)
files = ['IODP_Janus_312_U1256.csv', 'SRM_318_U1359_B_A.csv' ] # this one takes way too long: IODP_LIMS_SRMsection_344_1414A.csv
info = []
for f in files:
if f.endswith('csv') and 'summary' not in f and 'discrete' not in f and 'sample' not in f:
options['csv_file'] = f
program_ran, outfile = convert.iodp_srm(**options)
meas_df = cb.MagicDataFrame(pmag.resolve_file_name(outfile, dir_path))
self.assertTrue(len(meas_df.df) > 0)
#@unittest.skip("iodp_srm_magic is missing an example datafile")
def test_iodp_with_one_file(self):
options = {}
#dir_path = os.path.join(WD, 'data_files', 'convert_2_magic',
# 'iodp_srm_magic')
dir_path = os.path.join(WD, 'data_files', 'UTESTA', 'SRM_data')
options['dir_path'] = dir_path
options['input_dir_path'] = dir_path
options['csv_file'] = 'srmsection-XXX-UTEST-A.csv'
program_ran, outfile = convert.iodp_srm(**options)
self.assertEqual(program_ran, True)
self.assertEqual(outfile, os.path.join('measurements.txt'))
meas_df = cb.MagicDataFrame(os.path.join(dir_path, outfile))
self.assertIn('sequence', meas_df.df.columns)
def test_iodp_with_one_file_with_path(self):
options = {}
dir_path = os.path.join('data_files', 'UTESTA', 'SRM_data')
#options['dir_path'] = dir_path
options['dir_path'] = WD #dir_path
options['input_dir_path'] = "fake/path"
options['csv_file'] = os.path.join(dir_path, 'srmsection-XXX-UTEST-A.csv')
program_ran, outfile = convert.iodp_srm(**options)
self.assertEqual(program_ran, True)
self.assertEqual(outfile, os.path.join('measurements.txt'))
class TestIodpDscrMagic(unittest.TestCase):
def setUp(self):
os.chdir(WD)
def tearDown(self):
filelist = ['measurements.txt', 'specimens.txt', 'samples.txt',
'sites.txt', 'locations.txt', 'custom_samples.txt']
#directory = os.path.join(WD)
pmag.remove_files(filelist, WD)
pmag.remove_files(['custom_measurements.txt'], os.path.join(WD, 'data_files'))
os.chdir(WD)
def test_iodp_with_no_files(self):
program_ran, error_message = convert.iodp_dscr()
self.assertFalse(program_ran)
self.assertEqual(error_message, 'No .csv files were found')
#@unittest.skip("iodp_srm_magic is missing an example datafile")
def test_iodp_with_one_file(self):
options = {}
#dir_path = os.path.join(WD, 'data_files', 'convert_2_magic',
#'iodp_srm_magic')
dir_path = os.path.join(WD, 'data_files', 'UTESTA', 'SRM_data')
options['input_dir_path'] = dir_path
options['csv_file'] = 'srmdiscrete-XXX-UTEST-A.csv'
program_ran, outfile = convert.iodp_dscr(**options)
self.assertEqual(program_ran, True)
self.assertEqual(outfile, 'measurements.txt')
def test_iodp_with_path(self):
options = {}
#dir_path = os.path.join(WD, 'data_files', 'convert_2_magic',
#'iodp_srm_magic')
dir_path = os.path.join(WD, 'data_files', 'UTESTA', 'SRM_data')
#options['input_dir_path'] = dir_path
options['csv_file'] = os.path.join('data_files', 'UTESTA', 'SRM_data', 'srmdiscrete-XXX-UTEST-A.csv')
options['meas_file'] = os.path.join(WD, 'data_files', 'custom_measurements.txt')
options['samp_file'] = 'custom_samples.txt'
program_ran, outfile = convert.iodp_dscr(**options)
self.assertEqual(program_ran, True)
self.assertEqual(outfile, os.path.join(WD, 'data_files', 'custom_measurements.txt'))
meas_df = cb.MagicDataFrame(outfile)
self.assertIn('sequence', meas_df.df.columns)
class TestIodpJr6Magic(unittest.TestCase):
def setUp(self):
os.chdir(WD)
def tearDown(self):
files = ['test.magic', 'other_er_samples.txt',
'custom_locations.txt', 'samples.txt', 'sites.txt',
'locations.txt', 'measurements.txt', 'specimens.txt']
pmag.remove_files(files, WD)
# then, make sure that hidden_er_samples.txt has been successfully renamed to er_samples.txt
input_dir = os.path.join(WD, 'data_files', 'convert_2_magic',
'iodp_jr6_magic')
hidden_sampfile = os.path.join(input_dir, 'hidden_er_samples.txt')
sampfile = os.path.join(input_dir, 'er_samples.txt')
if os.path.exists(hidden_sampfile):
os.rename(hidden_sampfile, sampfile)
pmag.remove_files(['custom_specimens.txt'], 'data_files')
os.chdir(WD)
def test_iodp_jr6_with_no_files(self):
with self.assertRaises(TypeError):
convert.iodp_jr6()
def test_iodp_jr6_with_invalid_mag_file(self):
options = {'mag_file': 'fake'}
program_ran, error_message = convert.iodp_jr6(**options)
expected_msg = 'The input file you provided: {} does not exist.\nMake sure you have specified the correct filename AND correct input directory name.'.format(os.path.realpath(os.path.join('.', 'fake')))
self.assertFalse(program_ran)
self.assertEqual(error_message, expected_msg)
#@unittest.skipIf('win32' in sys.platform or 'win62' in sys.platform, "Requires up to date version of pandas")
def test_iodp_jr6_with_magfile(self):
options = {}
input_dir = os.path.join(WD, 'data_files', 'convert_2_magic',
'iodp_jr6_magic')
options['input_dir_path'] = input_dir
mag_file = 'test.jr6'
options['mag_file'] = 'test.jr6'
meas_file = 'test.magic'
options['meas_file'] = meas_file
program_ran, outfile = convert.iodp_jr6(**options)
self.assertTrue(program_ran)
self.assertEqual(outfile, meas_file)
meas_df = cb.MagicDataFrame(outfile)
self.assertIn('sequence', meas_df.df.columns)
def test_iodp_jr6_with_path(self):
options = {}
input_dir = os.path.join(WD, 'data_files', 'convert_2_magic',
'iodp_jr6_magic')
#options['input_dir_path'] = input_dir
mag_file = os.path.join('data_files', 'convert_2_magic', 'iodp_jr6_magic', 'test.jr6')
options['mag_file'] = mag_file #'test.jr6'
options['spec_file'] = os.path.join('data_files', 'custom_specimens.txt')
options['loc_file'] = 'custom_locations.txt'
meas_file = 'test.magic'
options['meas_file'] = meas_file
program_ran, outfile = convert.iodp_jr6(**options)
self.assertTrue(program_ran)
self.assertEqual(outfile, meas_file)
for fname in [options['loc_file'], options['spec_file']]:
self.assertTrue(os.path.isfile(fname))
#@unittest.skipIf('win32' in sys.platform or 'win62' in sys.platform, "Requires up to date version of pandas")
def test_iodp_jr6_with_options(self):
options = {}
input_dir = os.path.join(WD, 'data_files', 'convert_2_magic',
'iodp_jr6_magic')
options['input_dir_path'] = input_dir
mag_file = 'test.jr6'
options['mag_file'] = 'test.jr6'
meas_file = 'test.magic'
options['meas_file'] = meas_file
options['noave'] = 1
options['lat'] = 3
options['lon'] = 5
options['volume'] = 3
program_ran, outfile = convert.iodp_jr6(**options)
self.assertTrue(program_ran)
self.assertEqual(outfile, meas_file)
class TestIodpSamplesMagic(unittest.TestCase):
def setUp(self):
self.input_dir = os.path.join(WD, 'data_files', 'convert_2_magic',
'iodp_srm_magic')
def tearDown(self):
os.chdir(WD)
filelist = ['er_samples.txt']
pmag.remove_files(filelist, WD)
def test_with_wrong_format(self):
infile = os.path.join(self.input_dir, 'GCR_U1359_B_coresummary.csv')
program_ran, error_message = convert.iodp_samples(infile)
self.assertFalse(program_ran)
expected_error = 'Could not extract the necessary data from your input file.\nPlease make sure you are providing a correctly formated IODP samples csv file.'
self.assertEqual(error_message, expected_error)
def test_with_right_format(self):
reference_file = os.path.join(WD, 'testing', 'odp_magic',
'odp_magic_er_samples.txt')
infile = os.path.join(self.input_dir, 'samples_318_U1359_B.csv')
program_ran, outfile = convert.iodp_samples(infile, data_model_num=2)
self.assertTrue(program_ran)
expected_file = os.path.join('.', 'er_samples.txt')
self.assertEqual(outfile, expected_file)
self.assertTrue(os.path.isfile(outfile))
def test_content_with_right_format(self):
reference_file = os.path.join(WD, 'data_files', 'testing',
'odp_magic', 'odp_magic_er_samples.txt')
infile = os.path.join(self.input_dir, 'samples_318_U1359_B.csv')
program_ran, outfile = convert.iodp_samples(infile, data_model_num=2)
with open(reference_file) as ref_file:
ref_lines = ref_file.readlines()
with open(outfile) as out_file:
out_lines = out_file.readlines()
self.assertTrue(program_ran)
self.assertEqual(ref_lines, out_lines)
def test_with_data_model3(self):
infile = os.path.join(self.input_dir, 'samples_318_U1359_B.csv')
program_ran, outfile = convert.iodp_samples(infile, data_model_num=3)
self.assertTrue(program_ran)
self.assertEqual(os.path.realpath('samples.txt'), os.path.realpath(outfile))
class TestJr6TxtMagic(unittest.TestCase):
def setUp(self):
os.chdir(WD)
def tearDown(self):
files = ['test.magic', 'other_er_samples.txt',
'custom_locations.txt', 'samples.txt', 'sites.txt',
'measurements.txt', 'locations.txt', 'specimens.txt']
pmag.remove_files(files, WD)
def test_success(self):
input_dir = os.path.join(WD, 'data_files', 'convert_2_magic',
'jr6_magic')
output = convert.jr6_txt(**{'mag_file': 'AP12.txt', 'input_dir_path': input_dir})
self.assertTrue(output[0])
self.assertEqual(output[1], 'measurements.txt')
def test_with_options(self):
input_dir = os.path.join(WD, 'data_files', 'convert_2_magic',
'jr6_magic')
options = {'mag_file': 'AP12.txt', 'input_dir_path': input_dir}
options['meas_file'] = "test.magic"
options['lat'] = 1
options['lon'] = 2
options['noave'] = True
output = convert.jr6_txt(**options)
self.assertTrue(output[0])
self.assertEqual(output[1], 'test.magic')
site_df = cb.MagicDataFrame(os.path.join(WD, 'sites.txt'))
self.assertEqual(1, site_df.df.lat.values[0])
class TestJr6Jr6Magic(unittest.TestCase):
def setUp(self):
os.chdir(WD)
def tearDown(self):
files = ['test.magic', 'other_er_samples.txt',
'custom_locations.txt', 'samples.txt', 'sites.txt',
'measurements.txt', 'locations.txt', 'specimens.txt']
pmag.remove_files(files, WD)
def test_success(self):
input_dir = os.path.join(WD, 'data_files', 'convert_2_magic',
'jr6_magic')
output = convert.jr6_jr6(**{'mag_file': 'AF.jr6', 'input_dir_path': input_dir})
self.assertTrue(output[0])
self.assertEqual(os.path.realpath(output[1]), os.path.realpath('measurements.txt'))
def test_with_options(self):
input_dir = os.path.join(WD, 'data_files', 'convert_2_magic',
'jr6_magic')
options = {'mag_file': 'SML07.JR6', 'input_dir_path': input_dir}
options['meas_file'] = "test.magic"
options['lat'] = 1
options['lon'] = 2
options['noave'] = True
output = convert.jr6_jr6(**options)
self.assertTrue(output[0])
self.assertEqual(os.path.realpath(output[1]), os.path.realpath('test.magic'))
site_df = cb.MagicDataFrame(os.path.join(WD, 'sites.txt'))
self.assertEqual(1, site_df.df.lat.values[0])
class TestKly4sMagic(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
filelist= ['magic_measurements.txt', 'my_magic_measurements.txt', 'er_specimens.txt', 'er_samples.txt', 'er_sites.txt', 'rmag_anisotropy.txt', 'my_rmag_anisotropy.txt']
pmag.remove_files(filelist, WD)
os.chdir(WD)
def test_kly4s_without_infile(self):
with self.assertRaises(TypeError):
convert.kly4s()
def test_kly4s_with_invalid_infile(self):
program_ran, error_message = convert.kly4s('hello.txt')
expected_file = os.path.join('.', 'hello.txt')
self.assertFalse(program_ran)
self.assertEqual(error_message, 'Error opening file: {}'.format(expected_file))
def test_kly4s_with_valid_infile(self):
in_dir = os.path.join(WD, 'data_files', 'convert_2_magic', 'kly4s_magic')
program_ran, outfile = convert.kly4s('KLY4S_magic_example.dat', dir_path=WD,
input_dir_path=in_dir, data_model_num=2)
self.assertTrue(program_ran)
self.assertEqual(outfile, os.path.join(WD, 'magic_measurements.txt'))
def test_kly4s_fail_option4(self):
in_dir = os.path.join(WD, 'data_files', 'convert_2_magic', 'kly4s_magic')
program_ran, error_message = convert.kly4s('KLY4S_magic_example.dat', samp_con="4",
dir_path=WD, input_dir_path=in_dir,
data_model_num=2)
self.assertFalse(program_ran)
self.assertEqual(error_message, "option [4] must be in form 4-Z where Z is an integer")
def test_kly4s_succeed_option4(self):
in_dir = os.path.join(WD, 'data_files', 'convert_2_magic', 'kly4s_magic')
program_ran, outfile = convert.kly4s('KLY4S_magic_example.dat', samp_con="4-2",
dir_path=WD, input_dir_path=in_dir,
data_model_num=2)
self.assertTrue(program_ran)
self.assertEqual(outfile, os.path.join(WD, 'magic_measurements.txt'))
self.assertTrue(os.path.isfile(os.path.join(WD, 'magic_measurements.txt')))
def test_kly4s_with_options(self):
in_dir = os.path.join(WD, 'data_files', 'convert_2_magic', 'kly4s_magic')
program_ran, outfile = convert.kly4s('KLY4S_magic_example.dat', specnum=1,
locname="location", inst="instrument",
samp_con=3, or_con=2,
measfile='my_magic_measurements.txt',
aniso_outfile="my_rmag_anisotropy.txt",
dir_path=WD, input_dir_path=in_dir,
data_model_num=2)
self.assertTrue(program_ran)
self.assertEqual(outfile, os.path.join(WD, 'my_magic_measurements.txt'))
self.assertTrue(os.path.isfile(os.path.join(WD, 'my_rmag_anisotropy.txt')))
def test_kly4s_with_valid_infile_data_model3(self):
in_dir = os.path.join(WD, 'data_files', 'convert_2_magic', 'kly4s_magic')
program_ran, outfile = convert.kly4s('KLY4S_magic_example.dat', dir_path=WD,
input_dir_path=in_dir, data_model_num=3)
con = cb.Contribution(WD)
self.assertEqual(['measurements', 'samples', 'sites', 'specimens'], sorted(con.tables))
class TestK15Magic(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
filelist = ['magic_measurements.txt', 'my_magic_measurements.txt',
'er_specimens.txt', 'er_samples.txt', 'my_er_samples.txt',
'er_sites.txt', 'rmag_anisotropy.txt',
'my_rmag_anisotropy.txt', 'rmag_results.txt',
'my_rmag_results.txt']
pmag.remove_files(filelist, WD)
os.chdir(WD)
def test_k15_with_files(self):
input_dir = os.path.join(WD, 'data_files',
'convert_2_magic', 'k15_magic')
program_ran, outfile = convert.k15('k15_example.dat',
input_dir_path=input_dir,
data_model_num=2)
self.assertTrue(program_ran)
self.assertEqual(outfile, os.path.realpath('magic_measurements.txt'))
def test_k15_fail_option4(self):
input_dir = os.path.join(WD, 'data_files', 'convert_2_magic',
'k15_magic')
program_ran, error_message = convert.k15('k15_example.dat',
sample_naming_con="4",
input_dir_path=input_dir,
data_model_num=2)
self.assertFalse(program_ran)
self.assertEqual(error_message, "option [4] must be in form 4-Z where Z is an integer")
def test_k15_succeed_option4(self):
input_dir = os.path.join(WD, 'data_files', 'convert_2_magic', 'k15_magic')
program_ran, outfile = convert.k15('k15_example.dat', sample_naming_con="4-2",
input_dir_path=input_dir,
data_model_num=2)
self.assertTrue(program_ran)
self.assertEqual(outfile, os.path.realpath("magic_measurements.txt"))
def test_k15_with_options(self):
input_dir = os.path.join(WD, 'data_files', 'convert_2_magic',
'k15_magic')
program_ran, outfile = convert.k15('k15_example.dat', specnum=2,
sample_naming_con="3",
location="Here",
meas_file="my_magic_measurements.txt",
samp_file="my_er_samples.txt",
aniso_outfile="my_rmag_anisotropy.txt",
result_file="my_rmag_results.txt",
input_dir_path=input_dir,
data_model_num=2)
self.assertTrue(program_ran)
self.assertEqual(outfile, os.path.realpath("my_magic_measurements.txt"))
def test_data_model3(self):
input_dir = os.path.join(WD, 'data_files', 'convert_2_magic',
'k15_magic')
program_ran, outfile = convert.k15('k15_example.dat', specnum=2,
input_dir_path=input_dir)
self.assertTrue(program_ran)
self.assertEqual(os.path.realpath('./measurements.txt'), os.path.realpath(outfile))
class TestLdeoMagic(unittest.TestCase):
def setUp(self):
os.chdir(WD)
self.input_dir = os.path.join(WD, 'data_files',
'convert_2_magic', 'ldeo_magic')
def tearDown(self):
#filelist = ['measurements.txt', 'specimens.txt',
# 'samples.txt', 'sites.txt']
#pmag.remove_files(filelist, self.input_dir)
filelist = ['specimens.txt', 'samples.txt', 'sites.txt',
'locations.txt', 'custom_specimens.txt', 'measurements.txt',
'custom_measurements.txt']
pmag.remove_files(filelist, WD)
#pmag.remove_files(filelist, os.path.join(WD, 'data_files'))
os.chdir(WD)
def test_ldeo_with_no_files(self):
with self.assertRaises(TypeError):
convert.ldeo()
def test_ldeo_success(self):
options = {'input_dir_path': self.input_dir, 'magfile': 'ldeo_magic_example.dat'}
program_ran, outfile = convert.ldeo(**options)
self.assertTrue(program_ran)
self.assertEqual(os.path.realpath(outfile), os.path.join(WD, 'measurements.txt'))
meas_df = cb.MagicDataFrame(outfile)
self.assertIn('sequence', meas_df.df.columns)
def test_ldeo_options(self):
options = {'input_dir_path': self.input_dir, 'magfile': 'ldeo_magic_example.dat'}
options['noave'] = 1
options['specnum'] = 2
options['samp_con'] = 2
options['meas_file'] = "custom_measurements.txt"
options['location'] = "new place"
options['labfield'], options['phi'], options['theta'] = 40, 0, 90
program_ran, outfile = convert.ldeo(**options)
self.assertTrue(program_ran)
self.assertEqual(os.path.realpath(options['meas_file']), os.path.realpath(outfile))
class TestLivdbMagic(unittest.TestCase):
def setUp(self):
os.chdir(WD)
self.input_dir = os.path.join(WD, 'data_files',
'convert_2_magic', 'livdb_magic')
def tearDown(self):
filelist = ['measurements.txt', 'specimens.txt',
'samples.txt', 'sites.txt', 'locations.txt']
pmag.remove_files(filelist, WD)
#filelist = ['specimens.txt', 'samples.txt', 'sites.txt',
# 'locations.txt', 'custom_specimens.txt', 'measurements.txt']
#pmag.remove_files(filelist, '.')
#pmag.remove_files(filelist, os.path.join(WD, 'data_files'))
os.chdir(WD)
def test_livdb_success(self):
res, meas_file = convert.livdb(os.path.join(self.input_dir, "TH_IZZI+"))
self.assertTrue(res)
self.assertEqual(meas_file, os.path.realpath("measurements.txt"))
def test_livdb_all_experiment_types(self):
for folder in ["TH_IZZI+", "MW_C+", "MW_IZZI+andC++", "MW_OT+", "MW_P"]:
res, meas_file = convert.livdb(os.path.join(self.input_dir, folder))
self.assertTrue(res)
self.assertEqual(meas_file, os.path.realpath("measurements.txt"))
def test_with_options(self):
# naming con 1
res, meas_file = convert.livdb(os.path.join(self.input_dir, "TH_IZZI+"),
location_name="place", samp_name_con=1, meas_out="custom.txt")
self.assertTrue(res)
self.assertEqual(meas_file, os.path.realpath("custom.txt"))
df = cb.MagicDataFrame(os.path.join(WD, "specimens.txt"))
self.assertEqual("ATPIPV04-1A", df.df.loc["ATPIPV04-1A"]['sample'])
# naming con 2 without chars
res, meas_file = convert.livdb(os.path.join(self.input_dir, "TH_IZZI+"),
location_name="place", samp_name_con=2, site_name_con=2,
meas_out="custom.txt")
self.assertTrue(res)
self.assertEqual(meas_file, os.path.realpath("custom.txt"))
df = cb.MagicDataFrame(os.path.join(WD, "specimens.txt"))
self.assertEqual("ATPIPV04-1A", df.df.loc['ATPIPV04-1A']['sample'])
df = cb.MagicDataFrame(os.path.join(WD, "samples.txt"))
self.assertEqual("ATPIPV04-1A", df.df.loc['ATPIPV04-1A']['site'])
def test_naming_con_2(self):
res, meas_file = convert.livdb(os.path.join(self.input_dir, "TH_IZZI+"),
location_name="place", samp_name_con=2, samp_num_chars=1,
meas_out="custom.txt")
self.assertTrue(res)
self.assertEqual(meas_file, os.path.realpath("custom.txt"))
df = cb.MagicDataFrame(os.path.join(WD, "specimens.txt"))
self.assertEqual("ATPIPV04-1", df.df.loc["ATPIPV04-1A"]['sample'])
def test_naming_con_3(self):
res, meas_file = convert.livdb(os.path.join(self.input_dir, "TH_IZZI+"),
location_name="place", samp_name_con=3, samp_num_chars="-",
meas_out="custom.txt")
self.assertTrue(res)
self.assertEqual(meas_file, os.path.realpath("custom.txt"))
df = cb.MagicDataFrame(os.path.join(WD, "specimens.txt"))
self.assertEqual(df.df.loc['ATPIPV04-1A']['sample'], 'ATPIPV04')
df = cb.MagicDataFrame(os.path.join(WD, "samples.txt"))
self.assertEqual(df.df.loc['ATPIPV04']['site'], "ATPIPV04")
class TestMstMagic(unittest.TestCase):
def setUp(self):
os.chdir(WD)
self.input_dir = os.path.join(WD, 'data_files',
'convert_2_magic', 'mst_magic')
def tearDown(self):
filelist = ['measurements.txt', 'specimens.txt',
'samples.txt', 'sites.txt', 'locations.txt', 'custom.out']
pmag.remove_files(filelist, WD)
#filelist = ['specimens.txt', 'samples.txt', 'sites.txt',
# 'locations.txt', 'custom_specimens.txt', 'measurements.txt']
pmag.remove_files(filelist, '.')
pmag.remove_files(filelist, os.path.join(WD, 'data_files'))
os.chdir(WD)
def test_mst_with_no_files(self):
with self.assertRaises(TypeError):
convert.mst()
def test_mst_success(self):
options = {'input_dir_path': self.input_dir, 'infile': 'curie_example.dat'}
options['spec_name'] = 'abcde'
options['location'] = 'place'
program_ran, outfile = convert.mst(**options)
self.assertTrue(program_ran)
self.assertEqual(os.path.realpath(outfile), os.path.join(WD, 'measurements.txt'))
meas_df = cb.MagicDataFrame(outfile)
self.assertIn('sequence', meas_df.df.columns)
self.assertEqual(meas_df.df.location.values[0], 'place')
con = cb.Contribution(WD)
for table in ['measurements', 'specimens', 'samples', 'sites', 'locations']:
self.assertIn(table, con.tables)
def test_mst_synthetic(self):
options = {'input_dir_path': self.input_dir, 'infile': 'curie_example.dat'}
options['spec_name'] = 'abcde'
options['syn'] = True
program_ran, outfile = convert.mst(**options)
self.assertTrue(program_ran)
self.assertEqual(os.path.realpath(outfile), os.path.join(WD, 'measurements.txt'))
class TestMiniMagic(unittest.TestCase):
def setUp(self):
os.chdir(WD)
self.input_dir = os.path.join(WD, 'data_files',
'convert_2_magic', 'mini_magic')
def tearDown(self):
filelist = ['measurements.txt', 'specimens.txt',
'samples.txt', 'sites.txt', 'locations.txt', 'custom.out']
pmag.remove_files(filelist, WD)
def test_bad_file(self):
program_ran, error = convert.mini('fake_file')
self.assertFalse(program_ran)
self.assertEqual(error, "bad mag file name")
def test_success(self):
magfile = os.path.join(self.input_dir, "Peru_rev1.txt")
program_ran, outfile = convert.mini(magfile)
self.assertTrue(program_ran)
self.assertEqual(outfile, "measurements.txt")
def test_options(self):
magfile = os.path.join(self.input_dir, "Peru_rev1.txt")
program_ran, outfile = convert.mini(magfile, meas_file="custom.out",
user="me", noave=1, volume=15,
methcode="LP:FAKE")
self.assertTrue(program_ran)
self.assertEqual(outfile, "custom.out")
def test_dm_2(self):
magfile = os.path.join(self.input_dir, "Peru_rev1.txt")
program_ran, outfile = convert.mini(magfile, meas_file="custom.out",
user="me", noave=1, volume=15,
methcode="LP:FAKE", data_model_num=2)
self.assertTrue(program_ran)
self.assertEqual(outfile, "custom.out")
class TestPmdMagic(unittest.TestCase):
def setUp(self):
os.chdir(WD)
self.input_dir = os.path.join(WD, 'data_files',
'convert_2_magic', 'pmd_magic', 'PMD', )
def tearDown(self):
filelist = ['specimens.txt', 'samples.txt', 'sites.txt',
'locations.txt', 'custom_specimens.txt', 'measurements.txt',
'custom_meas.txt']
pmag.remove_files(filelist, WD)
pmag.remove_files(filelist, ".")
os.chdir(WD)
def test_pmd_with_no_files(self):
with self.assertRaises(TypeError):
convert.pmd()
def test_pmd_success(self):
options = {'input_dir_path': self.input_dir, 'mag_file': 'ss0207a.pmd'}
program_ran, outfile = convert.pmd(**options)
self.assertTrue(program_ran)
self.assertEqual(os.path.realpath(outfile), os.path.join(WD, 'measurements.txt'))
meas_df = cb.MagicDataFrame(outfile)
self.assertIn('sequence', meas_df.df.columns)
def test_pmd_options(self):
options = {'input_dir_path': self.input_dir, 'mag_file': 'ss0207a.pmd'}
options['lat'], options['lon'] = 5, 10
options['specnum'] = 2
options['location'] = 'place'
options['meas_file'] = 'custom_meas.txt'
program_ran, outfile = convert.pmd(**options)
self.assertTrue(program_ran)
self.assertEqual(os.path.realpath(outfile), os.path.join(WD, 'custom_meas.txt'))
loc_df = cb.MagicDataFrame(os.path.join(WD, 'locations.txt'))
self.assertEqual(loc_df.df.index.values[0], 'place')
class TestSioMagic(unittest.TestCase):
def setUp(self):
os.chdir(WD)
def tearDown(self):
filelist = ['sio_af_example.magic']
directory = os.path.join(WD, 'data_files', 'convert_2_magic',
'sio_magic')
pmag.remove_files(filelist, directory)
filelist = ['measurements.txt', 'specimens.txt',
'samples.txt', 'sites.txt', 'locations.txt']
pmag.remove_files(filelist, WD)
os.chdir(WD)
def test_sio_magic_no_files(self):
with self.assertRaises(TypeError):
convert.sio()
def test_sio_magic_success(self):
options = {}
dir_path = os.path.join('data_files', 'convert_2_magic',
'sio_magic')
options['mag_file'] = os.path.join(dir_path, 'sio_af_example.dat')
options['meas_file'] = os.path.join(dir_path, 'sio_af_example.magic')
program_ran, file_name = convert.sio(**options)
self.assertTrue(program_ran)
self.assertEqual(os.path.realpath(file_name),
os.path.realpath(options['meas_file']))
meas_df = cb.MagicDataFrame(os.path.realpath(options['meas_file']))
self.assertIn('sequence', meas_df.df.columns)
self.assertEqual(0, meas_df.df.iloc[0]['sequence'])
def test_sio_magic_success_with_wd(self):
options = {}
dir_path = os.path.join('data_files', 'convert_2_magic',
'sio_magic')
options['mag_file'] = os.path.join('sio_af_example.dat')
options['meas_file'] = os.path.join('sio_af_example.magic')
options['dir_path'] = dir_path
program_ran, file_name = convert.sio(**options)
self.assertTrue(program_ran)
self.assertEqual(os.path.realpath(file_name),
os.path.realpath(os.path.join(dir_path, options['meas_file'])))
def test_sio_magic_fail_option4(self):
options = {}
options['mag_file'] = os.path.join(WD, 'data_files',
'convert_2_magic', 'sio_magic',
'sio_af_example.dat')
meas_file = os.path.join(WD, 'data_files', 'convert_2_magic',
'sio_magic', 'sio_af_example.magic')
options['meas_file'] = meas_file
options['samp_con'] = '4'
program_ran, error_message = convert.sio(**options)
self.assertFalse(program_ran)
self.assertEqual(error_message, "naming convention option [4] must be in form 4-Z where Z is an integer")
def test_sio_magic_succeed_option4(self):
options = {}
options['mag_file'] = os.path.join(WD, 'data_files',
'convert_2_magic', 'sio_magic',
'sio_af_example.dat')
meas_file = os.path.join(WD, 'data_files', 'convert_2_magic',
'sio_magic', 'sio_af_example.magic')
options['meas_file'] = meas_file
options['samp_con'] = '4-2'
program_ran, file_name = convert.sio(**options)
self.assertTrue(program_ran)
self.assertEqual(file_name, meas_file)
def test_sio_magic_fail_with_coil(self):
options = {}
options['mag_file'] = os.path.join(WD, 'data_files',
'convert_2_magic', 'sio_magic',
'sio_af_example.dat')
meas_file = os.path.join(WD, 'data_files', 'convert_2_magic',
'sio_magic', 'sio_af_example.magic')
options['meas_file'] = meas_file
options['coil'] = 4
program_ran, error_message = convert.sio(**options)
self.assertFalse(program_ran)
self.assertEqual(error_message, '4 is not a valid coil specification')
def test_sio_magic_succeed_with_coil(self):
options = {}
options['mag_file'] = os.path.join(WD, 'data_files',
'convert_2_magic', 'sio_magic',
'sio_af_example.dat')
meas_file = os.path.join(WD, 'data_files', 'convert_2_magic',
'sio_magic', 'sio_af_example.magic')
options['meas_file'] = meas_file
options['coil'] = '1'
program_ran, file_name = convert.sio(**options)
self.assertTrue(program_ran)
self.assertEqual(file_name, meas_file)
class TestSMagic(unittest.TestCase):
def setUp(self):
os.chdir(WD)
self.input_dir = os.path.join(WD, 'data_files', 'convert_2_magic', 's_magic')
def tearDown(self):
filelist = ['measurements.txt', 'specimens.txt',
'samples.txt', 'sites.txt', 'locations.txt', 'custom.out']
pmag.remove_files(filelist, WD)
pmag.remove_files(filelist, self.input_dir)
def test_with_invalid_file(self):
res, error_msg = convert.s_magic('fake.txt')
self.assertFalse(res)
expected_file = os.path.join(WD, "fake.txt")
self.assertEqual(error_msg, "No such file: {}".format(expected_file))
def test_success(self):
res, outfile = convert.s_magic("s_magic_example.dat", dir_path=self.input_dir)
self.assertTrue(res)
self.assertEqual(outfile, os.path.join(self.input_dir, "specimens.txt"))
def test_with_options(self):
res, outfile = convert.s_magic("s_magic_example.dat", dir_path=self.input_dir,
specnum=1, location="place", spec="abcd-efg",
user="me", samp_con=2)
self.assertTrue(res)
self.assertEqual(outfile, os.path.join(self.input_dir, "specimens.txt"))
self.assertTrue(os.path.exists(os.path.join(self.input_dir, "sites.txt")))
con = cb.Contribution(self.input_dir)
self.assertIn('sites', con.tables)
self.assertEqual('place', con.tables['sites'].df.loc[:, 'location'].values[0])
class TestSufarAscMagic(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
filelist = ['magic_measurements.txt', 'my_magic_measurements.txt',
'er_specimens.txt', 'er_samples.txt', 'my_er_samples.txt',
'er_sites.txt', 'rmag_anisotropy.txt', 'my_rmag_anisotropy.txt',
'rmag_results.txt', 'my_rmag_results.txt', 'measurements.txt',
'specimens.txt', 'samples.txt', 'sites.txt', 'locations.txt']
pmag.remove_files(filelist, WD)
os.chdir(WD)
def test_sufar4_with_no_files(self):
with self.assertRaises(TypeError):
convert.sufar4()
def test_sufar4_with_invalid_file(self):
input_dir = os.path.join(WD, 'data_files',
'convert_2_magic', 'sufar_asc_magic')
infile = 'fake_sufar4-asc_magic_example.txt'
program_ran, error_message = convert.sufar4(infile,
input_dir_path=input_dir,
data_model_num=2)
self.assertFalse(program_ran)
self.assertEqual(error_message,
'Error opening file: {}'.format(os.path.join(input_dir,
infile)))
def test_sufar4_with_infile(self):
input_dir = os.path.join(WD, 'data_files', 'convert_2_magic',
'sufar_asc_magic')
infile = 'sufar4-asc_magic_example.txt'
program_ran, outfile = convert.sufar4(infile,
input_dir_path=input_dir,
data_model_num=2)
self.assertTrue(program_ran)
self.assertEqual(outfile, os.path.join('.', 'magic_measurements.txt'))
with open(outfile, 'r') as ofile:
lines = ofile.readlines()
self.assertEqual(292, len(lines))
def test_sufar4_succeed_data_model3(self):
input_dir = os.path.join(WD, 'data_files', 'convert_2_magic',
'sufar_asc_magic')
infile = 'sufar4-asc_magic_example.txt'
program_ran, outfile = convert.sufar4(infile,
input_dir_path=input_dir)
self.assertTrue(program_ran)
self.assertEqual(outfile, os.path.join('.', 'measurements.txt'))
with open(outfile, 'r') as ofile:
lines = ofile.readlines()
self.assertEqual(292, len(lines))
self.assertEqual('measurements', lines[0].split('\t')[1].strip())
con = cb.Contribution(WD)
self.assertEqual(sorted(con.tables),
sorted(['measurements', 'specimens',
'samples', 'sites']))
def test_sufar4_fail_option4(self):
input_dir = os.path.join(WD, 'data_files',
'convert_2_magic', 'sufar_asc_magic')
infile = 'sufar4-asc_magic_example.txt'
program_ran, error_message = convert.sufar4(infile,
input_dir_path=input_dir,
sample_naming_con='4',
data_model_num=2)
self.assertFalse(program_ran)
self.assertEqual(error_message, "option [4] must be in form 4-Z where Z is an integer")
def test_sufar4_succeed_option4(self):
input_dir = os.path.join(WD, 'data_files', 'convert_2_magic',
'sufar_asc_magic')
infile = 'sufar4-asc_magic_example.txt'
ofile = 'my_magic_measurements.txt'
program_ran, outfile = convert.sufar4(infile,
meas_output=ofile,
input_dir_path=input_dir,
sample_naming_con='4-2',
data_model_num=2)
self.assertTrue(program_ran)
self.assertEqual(outfile, os.path.join('.', ofile))
def test_sufar4_with_options(self):
input_dir = os.path.join(WD, 'data_files',
'convert_2_magic', 'sufar_asc_magic')
infile = 'sufar4-asc_magic_example.txt'
program_ran, outfile = convert.sufar4(infile, meas_output='my_magic_measurements.txt',
aniso_output="my_rmag_anisotropy.txt",
specnum=2, locname="Here", instrument="INST",
static_15_position_mode=True, input_dir_path=input_dir,
sample_naming_con='5',
data_model_num=2)
self.assertTrue(program_ran)
self.assertEqual(outfile, os.path.join('.', 'my_magic_measurements.txt'))
class TestTdtMagic(unittest.TestCase):
def setUp(self):
os.chdir(WD)
self.input_dir = os.path.join(WD, 'data_files',
'convert_2_magic', 'tdt_magic')
def tearDown(self):
filelist = ['measurements.txt', 'specimens.txt',
'samples.txt', 'sites.txt', 'locations.txt', 'custom.out']
pmag.remove_files(filelist, WD)
pmag.remove_files(filelist, '.')
pmag.remove_files(filelist, os.path.join(WD, 'data_files'))
os.chdir(WD)
def test_success(self):
res, outfile = convert.tdt(self.input_dir)
self.assertTrue(res)
self.assertEqual(outfile, os.path.join(self.input_dir, "measurements.txt"))
def test_with_options(self):
res, outfile = convert.tdt(self.input_dir, meas_file_name="custom.out", location="here",
user="me", samp_name_con=2, samp_name_chars=1, site_name_con=2,
site_name_chars=1, volume=15., lab_inc=-90)
self.assertTrue(res)
self.assertEqual(outfile, os.path.join(self.input_dir, "custom.out"))
df = cb.MagicDataFrame(os.path.join(self.input_dir, "samples.txt"))
self.assertEqual("MG", df.df["site"].values[0])
self.assertEqual("MGH", df.df["sample"].values[0])
class TestUtrechtMagic(unittest.TestCase):
def setUp(self):
os.chdir(WD)
self.input_dir = os.path.join(WD, 'data_files',
'convert_2_magic', 'utrecht_magic')
def tearDown(self):
filelist = ['measurements.txt', 'specimens.txt',
'samples.txt', 'sites.txt', 'locations.txt', 'custom.out']
pmag.remove_files(filelist, WD)
#filelist = ['specimens.txt', 'samples.txt', 'sites.txt',
# 'locations.txt', 'custom_specimens.txt', 'measurements.txt']
pmag.remove_files(filelist, '.')
pmag.remove_files(filelist, os.path.join(WD, 'data_files'))
os.chdir(WD)
def test_utrecht_with_no_files(self):
with self.assertRaises(TypeError):
convert.utrecht()
def test_utrecht_success(self):
options = {'input_dir_path': self.input_dir, 'mag_file': 'Utrecht_Example.af'}
program_ran, outfile = convert.utrecht(**options)
self.assertTrue(program_ran)
self.assertEqual(os.path.realpath(outfile), os.path.join(WD, 'measurements.txt'))
meas_df = cb.MagicDataFrame(outfile)
self.assertIn('sequence', meas_df.df.columns)
| bsd-3-clause |
nkmk/python-snippets | notebook/pandas_to_csv.py | 1 | 1404 | import pandas as pd
df = pd.read_csv('data/src/sample_pandas_normal.csv', index_col=0)
print(df)
# age state point
# name
# Alice 24 NY 64
# Bob 42 CA 92
# Charlie 18 CA 70
# Dave 68 TX 70
# Ellen 24 CA 88
# Frank 30 NY 57
df.to_csv('data/dst/to_csv_out.csv')
df.to_csv('data/dst/to_csv_out_columns.csv', columns=['age'])
df.to_csv('data/dst/to_csv_out_header_index.csv', header=False, index=False)
df.to_csv('data/dst/to_csv_out.tsv', sep='\t')
df.to_csv('data/dst/to_csv_out_a.csv')
df.to_csv('data/dst/to_csv_out_a.csv', mode='a', header=False)
df.to_csv('data/dst/to_csv_out_a_new_column.csv')
df = pd.read_csv('data/dst/to_csv_out_a_new_column.csv', index_col=0)
print(df)
# age state point
# name
# Alice 24 NY 64
# Bob 42 CA 92
# Charlie 18 CA 70
# Dave 68 TX 70
# Ellen 24 CA 88
# Frank 30 NY 57
df['new_col'] = 'new data'
print(df)
# age state point new_col
# name
# Alice 24 NY 64 new data
# Bob 42 CA 92 new data
# Charlie 18 CA 70 new data
# Dave 68 TX 70 new data
# Ellen 24 CA 88 new data
# Frank 30 NY 57 new data
df.to_csv('data/dst/to_csv_out_a_new_column.csv')
| mit |
Gaia3D/QGIS | python/plugins/processing/algs/qgis/RasterLayerHistogram.py | 1 | 3248 | # -*- coding: utf-8 -*-
"""
***************************************************************************
RasterLayerHistogram.py
---------------------
Date : January 2013
Copyright : (C) 2013 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'January 2013'
__copyright__ = '(C) 2013, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import matplotlib.pyplot as plt
import matplotlib.pylab as lab
from PyQt4.QtCore import QVariant
from qgis.core import QgsField
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.parameters import ParameterNumber
from processing.core.parameters import ParameterRaster
from processing.core.outputs import OutputTable
from processing.core.outputs import OutputHTML
from processing.tools import dataobjects
from processing.tools import raster
class RasterLayerHistogram(GeoAlgorithm):
INPUT = 'INPUT'
PLOT = 'PLOT'
TABLE = 'TABLE'
BINS = 'BINS'
def defineCharacteristics(self):
self.name = 'Raster layer histogram'
self.group = 'Graphics'
self.addParameter(ParameterRaster(self.INPUT,
self.tr('Input layer')))
self.addParameter(ParameterNumber(self.BINS,
self.tr('Number of bins'), 2, None, 10))
self.addOutput(OutputHTML(self.PLOT, self.tr('Output plot')))
self.addOutput(OutputTable(self.TABLE, self.tr('Output table')))
def processAlgorithm(self, progress):
layer = dataobjects.getObjectFromUri(
self.getParameterValue(self.INPUT))
nbins = self.getParameterValue(self.BINS)
outputplot = self.getOutputValue(self.PLOT)
outputtable = self.getOutputFromName(self.TABLE)
values = raster.scanraster(layer, progress)
# ALERT: this is potentially blocking if the layer is too big
plt.close()
valueslist = []
for v in values:
if v is not None:
valueslist.append(v)
(n, bins, values) = plt.hist(valueslist, nbins)
fields = [QgsField('CENTER_VALUE', QVariant.Double),
QgsField('NUM_ELEM', QVariant.Double)]
writer = outputtable.getTableWriter(fields)
for i in xrange(len(values)):
writer.addRecord([str(bins[i]) + '-' + str(bins[i + 1]), n[i]])
plotFilename = outputplot + '.png'
lab.savefig(plotFilename)
f = open(outputplot, 'w')
f.write('<html><img src="' + plotFilename + '"/></html>')
f.close()
| gpl-2.0 |
OpenDataAnalytics/etl | recon_to_xyzv_parq.py | 1 | 1700 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Imports
from skimage import io
import pandas as pd
import fastparquet as fp
import numpy as np
import os
import tempfile
def readTiff(filename):
"""Read data from the tiff file and return a Pandas dataframe"""
filenamePrefix = os.path.splitext(os.path.basename(filename))[0]
im = io.imread(filename)
# Reshape 3D to one giant 1D
imgdata1d = im.reshape(im.shape[0] * im.shape[1] * im.shape[2])
dataSize = im.shape[0] * im.shape[1] * im.shape[2]
sliceSize = im.shape[2] * im.shape[1]
data = {
'x': [(i % im.shape[2]) for i in range(0, dataSize)],
'y': [(i / im.shape[2] % im.shape[1]) for i in range(0, dataSize)],
'z': [int(i / sliceSize) for i in range(0, dataSize)],
'value': imgdata1d.astype(np.int32),
}
# Convert to Pandas dataframe
df = pd.DataFrame(data)
return df
def writeParquet(inputFilename, df):
"""Export Pandas dataframe as Parquet"""
filenamePrefix = os.path.splitext(os.path.basename(inputFilename))[0]
outFilepath = os.path.join(tempfile.gettempdir(), ''.join([filenamePrefix, '.parq']))
fp.write(outFilepath, df, compression='GZIP')
print outFilepath
return outFilepath
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Script to convert data in tiff format to Parquet format')
parser.add_argument('--tiff', dest='filename', help='input tiff file')
args = parser.parse_args()
# Read TIFF file and convert it into a Pandas dataframe
df = readTiff(args.filename)
# Export dataframe as parquet
outFilepath = writeParquet(args.filename, df)
| apache-2.0 |
Alex2114/Deb-3df | redeem/BedCompensation.py | 2 | 7939 | """
Author: Elias Bakken
email: elias(dot)bakken(at)gmail(dot)com
Website: http://www.thing-printer.com
License: GNU GPL v3: http://www.gnu.org/copyleft/gpl.html
Redeem is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Redeem is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Redeem. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy as np
import copy
class BedCompensation:
@staticmethod
def create_rotation_matrix(probe_points, probe_heights):
probe_points = copy.deepcopy(probe_points)
""" http://math.stackexchange.com/a/476311 """
if len(probe_points) == 3:
P0 = np.array([probe_points[0]["X"]/1000.0, probe_points[0]["Y"]/1000.0, probe_heights[0]/1000.0])
P1 = np.array([probe_points[1]["X"]/1000.0, probe_points[1]["Y"]/1000.0, probe_heights[1]/1000.0])
P2 = np.array([probe_points[2]["X"]/1000.0, probe_points[2]["Y"]/1000.0, probe_heights[2]/1000.0])
else:
# Add Z (height) to the probe points
for k, v in enumerate(probe_points):
probe_points[k]["X"] /= 1000.0
probe_points[k]["Y"] /= 1000.0
probe_points[k]["Z"] = probe_heights[k]/1000.0
(P0, P1, P2) = BedCompensation.create_plane_from_points(probe_points)
# calculate the bed normal vector
P10 = BedCompensation.normalize(P0-P1)
P21 = BedCompensation.normalize(P2-P1)
bed_normal = BedCompensation.normalize(np.cross(P10, P21))
# calculate a normal vector in world space in the same direction as the bed normal
ideal_normal = np.array([0.0, 0.0, np.sign(bed_normal[2])])
# calculate the rotation matrix that will align the ideal normal
# with the bed normal
v = np.cross(bed_normal, ideal_normal)
c = np.dot(bed_normal, ideal_normal)
s = np.linalg.norm(v)
ssc = np.array([[0.0, -v[2], v[1]],
[v[2], 0.0, -v[0]],
[-v[1], v[0], 0.0]])
R = np.eye(3) + ssc + (ssc**2)*(1.0 - c)/(s**2)
# check if the rotation matrix is valid, if not then return identity matrix
if np.all(np.isfinite(R)):
return R
#TODO: This makes no sense, it should be R, not R/4
return R*0.25 + np.eye(3)*0.75
else:
return np.eye(3)
@staticmethod
def normalize(vec):
return vec/np.linalg.norm(vec)
@staticmethod
def create_plane_from_points(points):
""" This method uses linear regression (least squares) to fit a plane
to a set of data points. This is useful if the number of probe points is > 3.
The plane is then used to sample three new points. """
x = []
y = []
z = []
for p in points:
x.append(p["X"])
y.append(p["Y"])
z.append(p["Z"])
A = np.column_stack((np.ones(len(x)), x, y))
# Solve for a least squares estimate
(coeffs, residuals, rank, sing_vals) = np.linalg.lstsq(A, z)
X = np.linspace(min(x), max(x), 3)
Y = np.linspace(min(y), max(y), 3)
X, Y = np.meshgrid(X, Y)
Z = coeffs[0]+coeffs[1]*X + coeffs[2]*Y
# Resample the probe points based on the least squares plane found.
P0 = np.array([min(x), min(y), coeffs[0]+coeffs[1]*min(x)+coeffs[2]*min(y)])
P1 = np.array([min(x), max(y), coeffs[0]+coeffs[1]*min(x)+coeffs[2]*max(y)])
P2 = np.array([(max(x)-min(x))/2.0, max(y), coeffs[0]+coeffs[1]*(max(x)-min(x))/2.0+coeffs[2]*max(y)])
return (P0, P1, P2)
if __name__ == "__main__":
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import json
points = [{"Y": 0.0, "X": 65.0, "Z": -1.6499999999999995}, {"Y": 32.5, "X": 56.29, "Z": -1.0625}, {"Y": 56.29, "X": 32.5, "Z": -0.56249999999999967}, {"Y": 65.0, "X": 0.0, "Z": -0.40000000000000019}, {"Y": 56.29, "X": -32.5, "Z": -0.67500000000000027}, {"Y": 32.5, "X": -56.29, "Z": -1.1875000000000002}, {"Y": 0.0, "X": -65.0, "Z": -1.7499999999999998}, {"Y": -32.5, "X": -56.29, "Z": -2.1624999999999996}, {"Y": -56.29, "X": -32.5, "Z": -2.4250000000000003}, {"Y": -65.0, "X": -0.0, "Z": -2.4375000000000004}, {"Y": -56.29, "X": 32.5, "Z": -2.3374999999999995}, {"Y": -32.5, "X": 56.29, "Z": -2.0999999999999996}, {"Y": 0.0, "X": 32.5, "Z": -1.5624999999999996}, {"Y": 16.25, "X": 28.15, "Z": -1.2624999999999997}, {"Y": 28.15, "X": 16.25, "Z": -1.0375000000000003}, {"Y": 32.5, "X": 0.0, "Z": -0.9750000000000002}, {"Y": 28.15, "X": -16.25, "Z": -1.0874999999999999}, {"Y": 16.25, "X": -28.15, "Z": -1.3499999999999996}, {"Y": 0.0, "X": -32.5, "Z": -1.6624999999999999}, {"Y": -16.25, "X": -28.15, "Z": -1.9249999999999996}, {"Y": -28.15, "X": -16.25, "Z": -2.0625}, {"Y": -32.5, "X": -0.0, "Z": -2.087499999999999}, {"Y": -28.15, "X": 16.25, "Z": -2.0}, {"Y": -16.25, "X": 28.15, "Z": -1.8250000000000002}, {"Y": 0.0, "X": 0.0, "Z": -1.575}]
fixed = [{"Y": 0.0, "X": 65.0, "Z": -1.7000000000000002}, {"Y": 32.5, "X": 56.29, "Z": -1.6249999999999998}, {"Y": 56.29, "X": 32.5, "Z": -1.4749999999999996}, {"Y": 65.0, "X": 0.0, "Z": -1.425}, {"Y": 56.29, "X": -32.5, "Z": -1.5374999999999999}, {"Y": 32.5, "X": -56.29, "Z": -1.6375000000000002}, {"Y": 0.0, "X": -65.0, "Z": -1.6874999999999998}, {"Y": -32.5, "X": -56.29, "Z": -1.5624999999999996}, {"Y": -56.29, "X": -32.5, "Z": -1.4999999999999996}, {"Y": -65.0, "X": -0.0, "Z": -1.3749999999999996}, {"Y": -56.29, "X": 32.5, "Z": -1.45}, {"Y": -32.5, "X": 56.29, "Z": -1.6249999999999998}, {"Y": 0.0, "X": 32.5, "Z": -1.575}, {"Y": 16.25, "X": 28.15, "Z": -1.5249999999999995}, {"Y": 28.15, "X": 16.25, "Z": -1.4749999999999996}, {"Y": 32.5, "X": 0.0, "Z": -1.45}, {"Y": 28.15, "X": -16.25, "Z": -1.4749999999999996}, {"Y": 16.25, "X": -28.15, "Z": -1.5374999999999999}, {"Y": 0.0, "X": -32.5, "Z": -1.5874999999999995}, {"Y": -16.25, "X": -28.15, "Z": -1.5999999999999999}, {"Y": -28.15, "X": -16.25, "Z": -1.575}, {"Y": -32.5, "X": -0.0, "Z": -1.5500000000000003}, {"Y": -28.15, "X": 16.25, "Z": -1.5374999999999999}, {"Y": -16.25, "X": 28.15, "Z": -1.5624999999999996}, {"Y": 0.0, "X": 0.0, "Z": -1.5500000000000003}]
add = points[-1]["Z"]
x1, y1, z1 = map(list, zip(*map(lambda d: tuple(np.array([d['X'], d['Y'], d['Z']])), points)))
x3, y3, z3 = map(list, zip(*map(lambda d: tuple(np.array([d['X'], d['Y'], d['Z']])), fixed)))
z = map(lambda d: d['Z'], points)
Rn = BedCompensation.create_rotation_matrix(points, z)
x2, y2, z2 = map(list, zip(*map(lambda d: tuple(np.array([d['X'], d['Y'], add ]).dot(Rn)), points )))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot(x1, y1, z1, linestyle="none", marker="o", mfc="none", markeredgecolor="red")
ax.plot(x2, y2, z2, linestyle="none", marker=".", mfc="none", markeredgecolor="green")
ax.plot(x3, y3, z3, linestyle="none", marker="o", mfc="none", markeredgecolor="blue")
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
print "Level bed max diff before: "+str(max(z1)-min(z1))+" after: "+str(max(z3)-min(z3))
print "var matrix = "+json.dumps(Rn.tolist())+";"
probe = {"x": x1, "y": y1, "z": z1}
print "var probe = "+json.dumps(probe)+";"
fixed = {"x": x3, "y": y3, "z": z3}
print "var fixed = "+json.dumps(fixed)+";"
plt.show()
| gpl-3.0 |
poojavade/Genomics_Docker | Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/ipython-2.2.0-py2.7.egg/IPython/core/usage.py | 7 | 23187 | # -*- coding: utf-8 -*-
"""Usage information for the main IPython applications.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
# Copyright (C) 2001-2007 Fernando Perez. <[email protected]>
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
import sys
from IPython.core import release
cl_usage = """\
=========
IPython
=========
Tools for Interactive Computing in Python
=========================================
A Python shell with automatic history (input and output), dynamic object
introspection, easier configuration, command completion, access to the
system shell and more. IPython can also be embedded in running programs.
Usage
ipython [subcommand] [options] [-c cmd | -m mod | file] [--] [arg] ...
If invoked with no options, it executes the file and exits, passing the
remaining arguments to the script, just as if you had specified the same
command with python. You may need to specify `--` before args to be passed
to the script, to prevent IPython from attempting to parse them. If you
specify the option `-i` before the filename, it will enter an interactive
IPython session after running the script, rather than exiting. Files ending
in .py will be treated as normal Python, but files ending in .ipy can
contain special IPython syntax (magic commands, shell expansions, etc.).
Almost all configuration in IPython is available via the command-line. Do
`ipython --help-all` to see all available options. For persistent
configuration, look into your `ipython_config.py` configuration file for
details.
This file is typically installed in the `IPYTHONDIR` directory, and there
is a separate configuration directory for each profile. The default profile
directory will be located in $IPYTHONDIR/profile_default. IPYTHONDIR
defaults to to `$HOME/.ipython`. For Windows users, $HOME resolves to
C:\\Documents and Settings\\YourUserName in most instances.
To initialize a profile with the default configuration file, do::
$> ipython profile create
and start editing `IPYTHONDIR/profile_default/ipython_config.py`
In IPython's documentation, we will refer to this directory as
`IPYTHONDIR`, you can change its default location by creating an
environment variable with this name and setting it to the desired path.
For more information, see the manual available in HTML and PDF in your
installation, or online at http://ipython.org/documentation.html.
"""
interactive_usage = """
IPython -- An enhanced Interactive Python
=========================================
IPython offers a combination of convenient shell features, special commands
and a history mechanism for both input (command history) and output (results
caching, similar to Mathematica). It is intended to be a fully compatible
replacement for the standard Python interpreter, while offering vastly
improved functionality and flexibility.
At your system command line, type 'ipython -h' to see the command line
options available. This document only describes interactive features.
MAIN FEATURES
-------------
* Access to the standard Python help. As of Python 2.1, a help system is
available with access to object docstrings and the Python manuals. Simply
type 'help' (no quotes) to access it.
* Magic commands: type %magic for information on the magic subsystem.
* System command aliases, via the %alias command or the configuration file(s).
* Dynamic object information:
Typing ?word or word? prints detailed information about an object. If
certain strings in the object are too long (docstrings, code, etc.) they get
snipped in the center for brevity.
Typing ??word or word?? gives access to the full information without
snipping long strings. Long strings are sent to the screen through the less
pager if longer than the screen, printed otherwise.
The ?/?? system gives access to the full source code for any object (if
available), shows function prototypes and other useful information.
If you just want to see an object's docstring, type '%pdoc object' (without
quotes, and without % if you have automagic on).
Both %pdoc and ?/?? give you access to documentation even on things which are
not explicitely defined. Try for example typing {}.get? or after import os,
type os.path.abspath??. The magic functions %pdef, %source and %file operate
similarly.
* Completion in the local namespace, by typing TAB at the prompt.
At any time, hitting tab will complete any available python commands or
variable names, and show you a list of the possible completions if there's
no unambiguous one. It will also complete filenames in the current directory.
This feature requires the readline and rlcomplete modules, so it won't work
if your Python lacks readline support (such as under Windows).
* Search previous command history in two ways (also requires readline):
- Start typing, and then use Ctrl-p (previous,up) and Ctrl-n (next,down) to
search through only the history items that match what you've typed so
far. If you use Ctrl-p/Ctrl-n at a blank prompt, they just behave like
normal arrow keys.
- Hit Ctrl-r: opens a search prompt. Begin typing and the system searches
your history for lines that match what you've typed so far, completing as
much as it can.
- %hist: search history by index (this does *not* require readline).
* Persistent command history across sessions.
* Logging of input with the ability to save and restore a working session.
* System escape with !. Typing !ls will run 'ls' in the current directory.
* The reload command does a 'deep' reload of a module: changes made to the
module since you imported will actually be available without having to exit.
* Verbose and colored exception traceback printouts. See the magic xmode and
xcolor functions for details (just type %magic).
* Input caching system:
IPython offers numbered prompts (In/Out) with input and output caching. All
input is saved and can be retrieved as variables (besides the usual arrow
key recall).
The following GLOBAL variables always exist (so don't overwrite them!):
_i: stores previous input.
_ii: next previous.
_iii: next-next previous.
_ih : a list of all input _ih[n] is the input from line n.
Additionally, global variables named _i<n> are dynamically created (<n>
being the prompt counter), such that _i<n> == _ih[<n>]
For example, what you typed at prompt 14 is available as _i14 and _ih[14].
You can create macros which contain multiple input lines from this history,
for later re-execution, with the %macro function.
The history function %hist allows you to see any part of your input history
by printing a range of the _i variables. Note that inputs which contain
magic functions (%) appear in the history with a prepended comment. This is
because they aren't really valid Python code, so you can't exec them.
* Output caching system:
For output that is returned from actions, a system similar to the input
cache exists but using _ instead of _i. Only actions that produce a result
(NOT assignments, for example) are cached. If you are familiar with
Mathematica, IPython's _ variables behave exactly like Mathematica's %
variables.
The following GLOBAL variables always exist (so don't overwrite them!):
_ (one underscore): previous output.
__ (two underscores): next previous.
___ (three underscores): next-next previous.
Global variables named _<n> are dynamically created (<n> being the prompt
counter), such that the result of output <n> is always available as _<n>.
Finally, a global dictionary named _oh exists with entries for all lines
which generated output.
* Directory history:
Your history of visited directories is kept in the global list _dh, and the
magic %cd command can be used to go to any entry in that list.
* Auto-parentheses and auto-quotes (adapted from Nathan Gray's LazyPython)
1. Auto-parentheses
Callable objects (i.e. functions, methods, etc) can be invoked like
this (notice the commas between the arguments)::
In [1]: callable_ob arg1, arg2, arg3
and the input will be translated to this::
callable_ob(arg1, arg2, arg3)
This feature is off by default (in rare cases it can produce
undesirable side-effects), but you can activate it at the command-line
by starting IPython with `--autocall 1`, set it permanently in your
configuration file, or turn on at runtime with `%autocall 1`.
You can force auto-parentheses by using '/' as the first character
of a line. For example::
In [1]: /globals # becomes 'globals()'
Note that the '/' MUST be the first character on the line! This
won't work::
In [2]: print /globals # syntax error
In most cases the automatic algorithm should work, so you should
rarely need to explicitly invoke /. One notable exception is if you
are trying to call a function with a list of tuples as arguments (the
parenthesis will confuse IPython)::
In [1]: zip (1,2,3),(4,5,6) # won't work
but this will work::
In [2]: /zip (1,2,3),(4,5,6)
------> zip ((1,2,3),(4,5,6))
Out[2]= [(1, 4), (2, 5), (3, 6)]
IPython tells you that it has altered your command line by
displaying the new command line preceded by -->. e.g.::
In [18]: callable list
-------> callable (list)
2. Auto-Quoting
You can force auto-quoting of a function's arguments by using ',' as
the first character of a line. For example::
In [1]: ,my_function /home/me # becomes my_function("/home/me")
If you use ';' instead, the whole argument is quoted as a single
string (while ',' splits on whitespace)::
In [2]: ,my_function a b c # becomes my_function("a","b","c")
In [3]: ;my_function a b c # becomes my_function("a b c")
Note that the ',' MUST be the first character on the line! This
won't work::
In [4]: x = ,my_function /home/me # syntax error
"""
interactive_usage_min = """\
An enhanced console for Python.
Some of its features are:
- Readline support if the readline library is present.
- Tab completion in the local namespace.
- Logging of input, see command-line options.
- System shell escape via ! , eg !ls.
- Magic commands, starting with a % (like %ls, %pwd, %cd, etc.)
- Keeps track of locally defined variables via %who, %whos.
- Show object information with a ? eg ?x or x? (use ?? for more info).
"""
quick_reference = r"""
IPython -- An enhanced Interactive Python - Quick Reference Card
================================================================
obj?, obj?? : Get help, or more help for object (also works as
?obj, ??obj).
?foo.*abc* : List names in 'foo' containing 'abc' in them.
%magic : Information about IPython's 'magic' % functions.
Magic functions are prefixed by % or %%, and typically take their arguments
without parentheses, quotes or even commas for convenience. Line magics take a
single % and cell magics are prefixed with two %%.
Example magic function calls:
%alias d ls -F : 'd' is now an alias for 'ls -F'
alias d ls -F : Works if 'alias' not a python name
alist = %alias : Get list of aliases to 'alist'
cd /usr/share : Obvious. cd -<tab> to choose from visited dirs.
%cd?? : See help AND source for magic %cd
%timeit x=10 : time the 'x=10' statement with high precision.
%%timeit x=2**100
x**100 : time 'x*100' with a setup of 'x=2**100'; setup code is not
counted. This is an example of a cell magic.
System commands:
!cp a.txt b/ : System command escape, calls os.system()
cp a.txt b/ : after %rehashx, most system commands work without !
cp ${f}.txt $bar : Variable expansion in magics and system commands
files = !ls /usr : Capture sytem command output
files.s, files.l, files.n: "a b c", ['a','b','c'], 'a\nb\nc'
History:
_i, _ii, _iii : Previous, next previous, next next previous input
_i4, _ih[2:5] : Input history line 4, lines 2-4
exec _i81 : Execute input history line #81 again
%rep 81 : Edit input history line #81
_, __, ___ : previous, next previous, next next previous output
_dh : Directory history
_oh : Output history
%hist : Command history. '%hist -g foo' search history for 'foo'
Autocall:
f 1,2 : f(1,2) # Off by default, enable with %autocall magic.
/f 1,2 : f(1,2) (forced autoparen)
,f 1 2 : f("1","2")
;f 1 2 : f("1 2")
Remember: TAB completion works in many contexts, not just file names
or python names.
The following magic functions are currently available:
"""
gui_reference = """\
===============================
The graphical IPython console
===============================
This console is designed to emulate the look, feel and workflow of a terminal
environment, while adding a number of enhancements that are simply not possible
in a real terminal, such as inline syntax highlighting, true multiline editing,
inline graphics and much more.
This quick reference document contains the basic information you'll need to
know to make the most efficient use of it. For the various command line
options available at startup, type ``ipython qtconsole --help`` at the command line.
Multiline editing
=================
The graphical console is capable of true multiline editing, but it also tries
to behave intuitively like a terminal when possible. If you are used to
IPython's old terminal behavior, you should find the transition painless, and
once you learn a few basic keybindings it will be a much more efficient
environment.
For single expressions or indented blocks, the console behaves almost like the
terminal IPython: single expressions are immediately evaluated, and indented
blocks are evaluated once a single blank line is entered::
In [1]: print "Hello IPython!" # Enter was pressed at the end of the line
Hello IPython!
In [2]: for i in range(10):
...: print i,
...:
0 1 2 3 4 5 6 7 8 9
If you want to enter more than one expression in a single input block
(something not possible in the terminal), you can use ``Control-Enter`` at the
end of your first line instead of ``Enter``. At that point the console goes
into 'cell mode' and even if your inputs are not indented, it will continue
accepting arbitrarily many lines until either you enter an extra blank line or
you hit ``Shift-Enter`` (the key binding that forces execution). When a
multiline cell is entered, IPython analyzes it and executes its code producing
an ``Out[n]`` prompt only for the last expression in it, while the rest of the
cell is executed as if it was a script. An example should clarify this::
In [3]: x=1 # Hit C-Enter here
...: y=2 # from now on, regular Enter is sufficient
...: z=3
...: x**2 # This does *not* produce an Out[] value
...: x+y+z # Only the last expression does
...:
Out[3]: 6
The behavior where an extra blank line forces execution is only active if you
are actually typing at the keyboard each line, and is meant to make it mimic
the IPython terminal behavior. If you paste a long chunk of input (for example
a long script copied form an editor or web browser), it can contain arbitrarily
many intermediate blank lines and they won't cause any problems. As always,
you can then make it execute by appending a blank line *at the end* or hitting
``Shift-Enter`` anywhere within the cell.
With the up arrow key, you can retrieve previous blocks of input that contain
multiple lines. You can move inside of a multiline cell like you would in any
text editor. When you want it executed, the simplest thing to do is to hit the
force execution key, ``Shift-Enter`` (though you can also navigate to the end
and append a blank line by using ``Enter`` twice).
If you've edited a multiline cell and accidentally navigate out of it with the
up or down arrow keys, IPython will clear the cell and replace it with the
contents of the one above or below that you navigated to. If this was an
accident and you want to retrieve the cell you were editing, use the Undo
keybinding, ``Control-z``.
Key bindings
============
The IPython console supports most of the basic Emacs line-oriented keybindings,
in addition to some of its own.
The keybinding prefixes mean:
- ``C``: Control
- ``S``: Shift
- ``M``: Meta (typically the Alt key)
The keybindings themselves are:
- ``Enter``: insert new line (may cause execution, see above).
- ``C-Enter``: *force* new line, *never* causes execution.
- ``S-Enter``: *force* execution regardless of where cursor is, no newline added.
- ``Up``: step backwards through the history.
- ``Down``: step forwards through the history.
- ``S-Up``: search backwards through the history (like ``C-r`` in bash).
- ``S-Down``: search forwards through the history.
- ``C-c``: copy highlighted text to clipboard (prompts are automatically stripped).
- ``C-S-c``: copy highlighted text to clipboard (prompts are not stripped).
- ``C-v``: paste text from clipboard.
- ``C-z``: undo (retrieves lost text if you move out of a cell with the arrows).
- ``C-S-z``: redo.
- ``C-o``: move to 'other' area, between pager and terminal.
- ``C-l``: clear terminal.
- ``C-a``: go to beginning of line.
- ``C-e``: go to end of line.
- ``C-u``: kill from cursor to the begining of the line.
- ``C-k``: kill from cursor to the end of the line.
- ``C-y``: yank (paste)
- ``C-p``: previous line (like up arrow)
- ``C-n``: next line (like down arrow)
- ``C-f``: forward (like right arrow)
- ``C-b``: back (like left arrow)
- ``C-d``: delete next character, or exits if input is empty
- ``M-<``: move to the beginning of the input region.
- ``M->``: move to the end of the input region.
- ``M-d``: delete next word.
- ``M-Backspace``: delete previous word.
- ``C-.``: force a kernel restart (a confirmation dialog appears).
- ``C-+``: increase font size.
- ``C--``: decrease font size.
- ``C-M-Space``: toggle full screen. (Command-Control-Space on Mac OS X)
The IPython pager
=================
IPython will show long blocks of text from many sources using a builtin pager.
You can control where this pager appears with the ``--paging`` command-line
flag:
- ``inside`` [default]: the pager is overlaid on top of the main terminal. You
must quit the pager to get back to the terminal (similar to how a pager such
as ``less`` or ``more`` works).
- ``vsplit``: the console is made double-tall, and the pager appears on the
bottom area when needed. You can view its contents while using the terminal.
- ``hsplit``: the console is made double-wide, and the pager appears on the
right area when needed. You can view its contents while using the terminal.
- ``none``: the console never pages output.
If you use the vertical or horizontal paging modes, you can navigate between
terminal and pager as follows:
- Tab key: goes from pager to terminal (but not the other way around).
- Control-o: goes from one to another always.
- Mouse: click on either.
In all cases, the ``q`` or ``Escape`` keys quit the pager (when used with the
focus on the pager area).
Running subprocesses
====================
The graphical IPython console uses the ``pexpect`` module to run subprocesses
when you type ``!command``. This has a number of advantages (true asynchronous
output from subprocesses as well as very robust termination of rogue
subprocesses with ``Control-C``), as well as some limitations. The main
limitation is that you can *not* interact back with the subprocess, so anything
that invokes a pager or expects you to type input into it will block and hang
(you can kill it with ``Control-C``).
We have provided as magics ``%less`` to page files (aliased to ``%more``),
``%clear`` to clear the terminal, and ``%man`` on Linux/OSX. These cover the
most common commands you'd want to call in your subshell and that would cause
problems if invoked via ``!cmd``, but you need to be aware of this limitation.
Display
=======
The IPython console can now display objects in a variety of formats, including
HTML, PNG and SVG. This is accomplished using the display functions in
``IPython.core.display``::
In [4]: from IPython.core.display import display, display_html
In [5]: from IPython.core.display import display_png, display_svg
Python objects can simply be passed to these functions and the appropriate
representations will be displayed in the console as long as the objects know
how to compute those representations. The easiest way of teaching objects how
to format themselves in various representations is to define special methods
such as: ``_repr_html_``, ``_repr_svg_`` and ``_repr_png_``. IPython's display formatters
can also be given custom formatter functions for various types::
In [6]: ip = get_ipython()
In [7]: html_formatter = ip.display_formatter.formatters['text/html']
In [8]: html_formatter.for_type(Foo, foo_to_html)
For further details, see ``IPython.core.formatters``.
Inline matplotlib graphics
==========================
The IPython console is capable of displaying matplotlib figures inline, in SVG
or PNG format. If started with the ``matplotlib=inline``, then all figures are
rendered inline automatically (PNG by default). If started with ``--matplotlib``
or ``matplotlib=<your backend>``, then a GUI backend will be used, but IPython's
``display()`` and ``getfigs()`` functions can be used to view plots inline::
In [9]: display(*getfigs()) # display all figures inline
In[10]: display(*getfigs(1,2)) # display figures 1 and 2 inline
"""
quick_guide = """\
? -> Introduction and overview of IPython's features.
%quickref -> Quick reference.
help -> Python's own help system.
object? -> Details about 'object', use 'object??' for extra details.
"""
gui_note = """\
%guiref -> A brief reference about the graphical user interface.
"""
default_banner_parts = [
'Python %s\n' % (sys.version.split('\n')[0],),
'Type "copyright", "credits" or "license" for more information.\n\n',
'IPython {version} -- An enhanced Interactive Python.\n'.format(
version=release.version,
),
quick_guide
]
default_gui_banner_parts = default_banner_parts + [gui_note]
default_banner = ''.join(default_banner_parts)
default_gui_banner = ''.join(default_gui_banner_parts)
# page GUI Reference, for use as a magic:
def page_guiref(arg_s=None):
"""Show a basic reference about the GUI Console."""
from IPython.core import page
page.page(gui_reference, auto_html=True)
| apache-2.0 |
ltiao/scikit-learn | sklearn/manifold/setup.py | 99 | 1243 | import os
from os.path import join
import numpy
from numpy.distutils.misc_util import Configuration
from sklearn._build_utils import get_blas_info
def configuration(parent_package="", top_path=None):
config = Configuration("manifold", parent_package, top_path)
libraries = []
if os.name == 'posix':
libraries.append('m')
config.add_extension("_utils",
sources=["_utils.c"],
include_dirs=[numpy.get_include()],
libraries=libraries,
extra_compile_args=["-O3"])
cblas_libs, blas_info = get_blas_info()
eca = blas_info.pop('extra_compile_args', [])
eca.append("-O4")
config.add_extension("_barnes_hut_tsne",
libraries=cblas_libs,
sources=["_barnes_hut_tsne.c"],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=eca, **blas_info)
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration().todict())
| bsd-3-clause |
glorizen/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/ticker.py | 69 | 37420 | """
Tick locating and formatting
============================
This module contains classes to support completely configurable tick
locating and formatting. Although the locators know nothing about
major or minor ticks, they are used by the Axis class to support major
and minor tick locating and formatting. Generic tick locators and
formatters are provided, as well as domain specific custom ones..
Tick locating
-------------
The Locator class is the base class for all tick locators. The
locators handle autoscaling of the view limits based on the data
limits, and the choosing of tick locations. A useful semi-automatic
tick locator is MultipleLocator. You initialize this with a base, eg
10, and it picks axis limits and ticks that are multiples of your
base.
The Locator subclasses defined here are
:class:`NullLocator`
No ticks
:class:`FixedLocator`
Tick locations are fixed
:class:`IndexLocator`
locator for index plots (eg. where x = range(len(y)))
:class:`LinearLocator`
evenly spaced ticks from min to max
:class:`LogLocator`
logarithmically ticks from min to max
:class:`MultipleLocator`
ticks and range are a multiple of base;
either integer or float
:class:`OldAutoLocator`
choose a MultipleLocator and dyamically reassign it for
intelligent ticking during navigation
:class:`MaxNLocator`
finds up to a max number of ticks at nice locations
:class:`AutoLocator`
:class:`MaxNLocator` with simple defaults. This is the default
tick locator for most plotting.
There are a number of locators specialized for date locations - see
the dates module
You can define your own locator by deriving from Locator. You must
override the __call__ method, which returns a sequence of locations,
and you will probably want to override the autoscale method to set the
view limits from the data limits.
If you want to override the default locator, use one of the above or a
custom locator and pass it to the x or y axis instance. The relevant
methods are::
ax.xaxis.set_major_locator( xmajorLocator )
ax.xaxis.set_minor_locator( xminorLocator )
ax.yaxis.set_major_locator( ymajorLocator )
ax.yaxis.set_minor_locator( yminorLocator )
The default minor locator is the NullLocator, eg no minor ticks on by
default.
Tick formatting
---------------
Tick formatting is controlled by classes derived from Formatter. The
formatter operates on a single tick value and returns a string to the
axis.
:class:`NullFormatter`
no labels on the ticks
:class:`FixedFormatter`
set the strings manually for the labels
:class:`FuncFormatter`
user defined function sets the labels
:class:`FormatStrFormatter`
use a sprintf format string
:class:`ScalarFormatter`
default formatter for scalars; autopick the fmt string
:class:`LogFormatter`
formatter for log axes
You can derive your own formatter from the Formatter base class by
simply overriding the ``__call__`` method. The formatter class has access
to the axis view and data limits.
To control the major and minor tick label formats, use one of the
following methods::
ax.xaxis.set_major_formatter( xmajorFormatter )
ax.xaxis.set_minor_formatter( xminorFormatter )
ax.yaxis.set_major_formatter( ymajorFormatter )
ax.yaxis.set_minor_formatter( yminorFormatter )
See :ref:`pylab_examples-major_minor_demo1` for an example of setting
major an minor ticks. See the :mod:`matplotlib.dates` module for
more information and examples of using date locators and formatters.
"""
from __future__ import division
import math
import numpy as np
from matplotlib import rcParams
from matplotlib import cbook
from matplotlib import transforms as mtransforms
class TickHelper:
axis = None
class DummyAxis:
def __init__(self):
self.dataLim = mtransforms.Bbox.unit()
self.viewLim = mtransforms.Bbox.unit()
def get_view_interval(self):
return self.viewLim.intervalx
def set_view_interval(self, vmin, vmax):
self.viewLim.intervalx = vmin, vmax
def get_data_interval(self):
return self.dataLim.intervalx
def set_data_interval(self, vmin, vmax):
self.dataLim.intervalx = vmin, vmax
def set_axis(self, axis):
self.axis = axis
def create_dummy_axis(self):
if self.axis is None:
self.axis = self.DummyAxis()
def set_view_interval(self, vmin, vmax):
self.axis.set_view_interval(vmin, vmax)
def set_data_interval(self, vmin, vmax):
self.axis.set_data_interval(vmin, vmax)
def set_bounds(self, vmin, vmax):
self.set_view_interval(vmin, vmax)
self.set_data_interval(vmin, vmax)
class Formatter(TickHelper):
"""
Convert the tick location to a string
"""
# some classes want to see all the locs to help format
# individual ones
locs = []
def __call__(self, x, pos=None):
'Return the format for tick val x at position pos; pos=None indicated unspecified'
raise NotImplementedError('Derived must overide')
def format_data(self,value):
return self.__call__(value)
def format_data_short(self,value):
'return a short string version'
return self.format_data(value)
def get_offset(self):
return ''
def set_locs(self, locs):
self.locs = locs
def fix_minus(self, s):
"""
some classes may want to replace a hyphen for minus with the
proper unicode symbol as described `here
<http://sourceforge.net/tracker/index.php?func=detail&aid=1962574&group_id=80706&atid=560720>`_.
The default is to do nothing
Note, if you use this method, eg in :meth`format_data` or
call, you probably don't want to use it for
:meth:`format_data_short` since the toolbar uses this for
interative coord reporting and I doubt we can expect GUIs
across platforms will handle the unicode correctly. So for
now the classes that override :meth:`fix_minus` should have an
explicit :meth:`format_data_short` method
"""
return s
class NullFormatter(Formatter):
'Always return the empty string'
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
return ''
class FixedFormatter(Formatter):
'Return fixed strings for tick labels'
def __init__(self, seq):
"""
seq is a sequence of strings. For positions `i<len(seq)` return
*seq[i]* regardless of *x*. Otherwise return ''
"""
self.seq = seq
self.offset_string = ''
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
if pos is None or pos>=len(self.seq): return ''
else: return self.seq[pos]
def get_offset(self):
return self.offset_string
def set_offset_string(self, ofs):
self.offset_string = ofs
class FuncFormatter(Formatter):
"""
User defined function for formatting
"""
def __init__(self, func):
self.func = func
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
return self.func(x, pos)
class FormatStrFormatter(Formatter):
"""
Use a format string to format the tick
"""
def __init__(self, fmt):
self.fmt = fmt
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
return self.fmt % x
class OldScalarFormatter(Formatter):
"""
Tick location is a plain old number.
"""
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
xmin, xmax = self.axis.get_view_interval()
d = abs(xmax - xmin)
return self.pprint_val(x,d)
def pprint_val(self, x, d):
#if the number is not too big and it's an int, format it as an
#int
if abs(x)<1e4 and x==int(x): return '%d' % x
if d < 1e-2: fmt = '%1.3e'
elif d < 1e-1: fmt = '%1.3f'
elif d > 1e5: fmt = '%1.1e'
elif d > 10 : fmt = '%1.1f'
elif d > 1 : fmt = '%1.2f'
else: fmt = '%1.3f'
s = fmt % x
#print d, x, fmt, s
tup = s.split('e')
if len(tup)==2:
mantissa = tup[0].rstrip('0').rstrip('.')
sign = tup[1][0].replace('+', '')
exponent = tup[1][1:].lstrip('0')
s = '%se%s%s' %(mantissa, sign, exponent)
else:
s = s.rstrip('0').rstrip('.')
return s
class ScalarFormatter(Formatter):
"""
Tick location is a plain old number. If useOffset==True and the data range
is much smaller than the data average, then an offset will be determined
such that the tick labels are meaningful. Scientific notation is used for
data < 1e-3 or data >= 1e4.
"""
def __init__(self, useOffset=True, useMathText=False):
# useOffset allows plotting small data ranges with large offsets:
# for example: [1+1e-9,1+2e-9,1+3e-9]
# useMathText will render the offset and scientific notation in mathtext
self._useOffset = useOffset
self._usetex = rcParams['text.usetex']
self._useMathText = useMathText
self.offset = 0
self.orderOfMagnitude = 0
self.format = ''
self._scientific = True
self._powerlimits = rcParams['axes.formatter.limits']
def fix_minus(self, s):
'use a unicode minus rather than hyphen'
if rcParams['text.usetex'] or not rcParams['axes.unicode_minus']: return s
else: return s.replace('-', u'\u2212')
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
if len(self.locs)==0:
return ''
else:
s = self.pprint_val(x)
return self.fix_minus(s)
def set_scientific(self, b):
'''True or False to turn scientific notation on or off
see also :meth:`set_powerlimits`
'''
self._scientific = bool(b)
def set_powerlimits(self, lims):
'''
Sets size thresholds for scientific notation.
e.g. ``xaxis.set_powerlimits((-3, 4))`` sets the pre-2007 default in
which scientific notation is used for numbers less than
1e-3 or greater than 1e4.
See also :meth:`set_scientific`.
'''
assert len(lims) == 2, "argument must be a sequence of length 2"
self._powerlimits = lims
def format_data_short(self,value):
'return a short formatted string representation of a number'
return '%1.3g'%value
def format_data(self,value):
'return a formatted string representation of a number'
s = self._formatSciNotation('%1.10e'% value)
return self.fix_minus(s)
def get_offset(self):
"""Return scientific notation, plus offset"""
if len(self.locs)==0: return ''
s = ''
if self.orderOfMagnitude or self.offset:
offsetStr = ''
sciNotStr = ''
if self.offset:
offsetStr = self.format_data(self.offset)
if self.offset > 0: offsetStr = '+' + offsetStr
if self.orderOfMagnitude:
if self._usetex or self._useMathText:
sciNotStr = self.format_data(10**self.orderOfMagnitude)
else:
sciNotStr = '1e%d'% self.orderOfMagnitude
if self._useMathText:
if sciNotStr != '':
sciNotStr = r'\times\mathdefault{%s}' % sciNotStr
s = ''.join(('$',sciNotStr,r'\mathdefault{',offsetStr,'}$'))
elif self._usetex:
if sciNotStr != '':
sciNotStr = r'\times%s' % sciNotStr
s = ''.join(('$',sciNotStr,offsetStr,'$'))
else:
s = ''.join((sciNotStr,offsetStr))
return self.fix_minus(s)
def set_locs(self, locs):
'set the locations of the ticks'
self.locs = locs
if len(self.locs) > 0:
vmin, vmax = self.axis.get_view_interval()
d = abs(vmax-vmin)
if self._useOffset: self._set_offset(d)
self._set_orderOfMagnitude(d)
self._set_format()
def _set_offset(self, range):
# offset of 20,001 is 20,000, for example
locs = self.locs
if locs is None or not len(locs) or range == 0:
self.offset = 0
return
ave_loc = np.mean(locs)
if ave_loc: # dont want to take log10(0)
ave_oom = math.floor(math.log10(np.mean(np.absolute(locs))))
range_oom = math.floor(math.log10(range))
if np.absolute(ave_oom-range_oom) >= 3: # four sig-figs
if ave_loc < 0:
self.offset = math.ceil(np.max(locs)/10**range_oom)*10**range_oom
else:
self.offset = math.floor(np.min(locs)/10**(range_oom))*10**(range_oom)
else: self.offset = 0
def _set_orderOfMagnitude(self,range):
# if scientific notation is to be used, find the appropriate exponent
# if using an numerical offset, find the exponent after applying the offset
if not self._scientific:
self.orderOfMagnitude = 0
return
locs = np.absolute(self.locs)
if self.offset: oom = math.floor(math.log10(range))
else:
if locs[0] > locs[-1]: val = locs[0]
else: val = locs[-1]
if val == 0: oom = 0
else: oom = math.floor(math.log10(val))
if oom <= self._powerlimits[0]:
self.orderOfMagnitude = oom
elif oom >= self._powerlimits[1]:
self.orderOfMagnitude = oom
else:
self.orderOfMagnitude = 0
def _set_format(self):
# set the format string to format all the ticklabels
# The floating point black magic (adding 1e-15 and formatting
# to 8 digits) may warrant review and cleanup.
locs = (np.asarray(self.locs)-self.offset) / 10**self.orderOfMagnitude+1e-15
sigfigs = [len(str('%1.8f'% loc).split('.')[1].rstrip('0')) \
for loc in locs]
sigfigs.sort()
self.format = '%1.' + str(sigfigs[-1]) + 'f'
if self._usetex:
self.format = '$%s$' % self.format
elif self._useMathText:
self.format = '$\mathdefault{%s}$' % self.format
def pprint_val(self, x):
xp = (x-self.offset)/10**self.orderOfMagnitude
if np.absolute(xp) < 1e-8: xp = 0
return self.format % xp
def _formatSciNotation(self, s):
# transform 1e+004 into 1e4, for example
tup = s.split('e')
try:
significand = tup[0].rstrip('0').rstrip('.')
sign = tup[1][0].replace('+', '')
exponent = tup[1][1:].lstrip('0')
if self._useMathText or self._usetex:
if significand == '1':
# reformat 1x10^y as 10^y
significand = ''
if exponent:
exponent = '10^{%s%s}'%(sign, exponent)
if significand and exponent:
return r'%s{\times}%s'%(significand, exponent)
else:
return r'%s%s'%(significand, exponent)
else:
s = ('%se%s%s' %(significand, sign, exponent)).rstrip('e')
return s
except IndexError, msg:
return s
class LogFormatter(Formatter):
"""
Format values for log axis;
if attribute *decadeOnly* is True, only the decades will be labelled.
"""
def __init__(self, base=10.0, labelOnlyBase = True):
"""
*base* is used to locate the decade tick,
which will be the only one to be labeled if *labelOnlyBase*
is ``False``
"""
self._base = base+0.0
self.labelOnlyBase=labelOnlyBase
self.decadeOnly = True
def base(self,base):
'change the *base* for labeling - warning: should always match the base used for :class:`LogLocator`'
self._base=base
def label_minor(self,labelOnlyBase):
'switch on/off minor ticks labeling'
self.labelOnlyBase=labelOnlyBase
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
vmin, vmax = self.axis.get_view_interval()
d = abs(vmax - vmin)
b=self._base
if x == 0.0:
return '0'
sign = np.sign(x)
# only label the decades
fx = math.log(abs(x))/math.log(b)
isDecade = self.is_decade(fx)
if not isDecade and self.labelOnlyBase: s = ''
elif x>10000: s= '%1.0e'%x
elif x<1: s = '%1.0e'%x
else : s = self.pprint_val(x,d)
if sign == -1:
s = '-%s' % s
return self.fix_minus(s)
def format_data(self,value):
self.labelOnlyBase = False
value = cbook.strip_math(self.__call__(value))
self.labelOnlyBase = True
return value
def format_data_short(self,value):
'return a short formatted string representation of a number'
return '%1.3g'%value
def is_decade(self, x):
n = self.nearest_long(x)
return abs(x-n)<1e-10
def nearest_long(self, x):
if x==0: return 0L
elif x>0: return long(x+0.5)
else: return long(x-0.5)
def pprint_val(self, x, d):
#if the number is not too big and it's an int, format it as an
#int
if abs(x)<1e4 and x==int(x): return '%d' % x
if d < 1e-2: fmt = '%1.3e'
elif d < 1e-1: fmt = '%1.3f'
elif d > 1e5: fmt = '%1.1e'
elif d > 10 : fmt = '%1.1f'
elif d > 1 : fmt = '%1.2f'
else: fmt = '%1.3f'
s = fmt % x
#print d, x, fmt, s
tup = s.split('e')
if len(tup)==2:
mantissa = tup[0].rstrip('0').rstrip('.')
sign = tup[1][0].replace('+', '')
exponent = tup[1][1:].lstrip('0')
s = '%se%s%s' %(mantissa, sign, exponent)
else:
s = s.rstrip('0').rstrip('.')
return s
class LogFormatterExponent(LogFormatter):
"""
Format values for log axis; using ``exponent = log_base(value)``
"""
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
vmin, vmax = self.axis.get_view_interval()
vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander = 0.05)
d = abs(vmax-vmin)
b=self._base
if x == 0:
return '0'
sign = np.sign(x)
# only label the decades
fx = math.log(abs(x))/math.log(b)
isDecade = self.is_decade(fx)
if not isDecade and self.labelOnlyBase: s = ''
#if 0: pass
elif fx>10000: s= '%1.0e'%fx
#elif x<1: s = '$10^{%d}$'%fx
#elif x<1: s = '10^%d'%fx
elif fx<1: s = '%1.0e'%fx
else : s = self.pprint_val(fx,d)
if sign == -1:
s = '-%s' % s
return self.fix_minus(s)
class LogFormatterMathtext(LogFormatter):
"""
Format values for log axis; using ``exponent = log_base(value)``
"""
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
b = self._base
# only label the decades
if x == 0:
return '$0$'
sign = np.sign(x)
fx = math.log(abs(x))/math.log(b)
isDecade = self.is_decade(fx)
usetex = rcParams['text.usetex']
if sign == -1:
sign_string = '-'
else:
sign_string = ''
if not isDecade and self.labelOnlyBase: s = ''
elif not isDecade:
if usetex:
s = r'$%s%d^{%.2f}$'% (sign_string, b, fx)
else:
s = '$\mathdefault{%s%d^{%.2f}}$'% (sign_string, b, fx)
else:
if usetex:
s = r'$%s%d^{%d}$'% (sign_string, b, self.nearest_long(fx))
else:
s = r'$\mathdefault{%s%d^{%d}}$'% (sign_string, b, self.nearest_long(fx))
return s
class Locator(TickHelper):
"""
Determine the tick locations;
Note, you should not use the same locator between different :class:`~matplotlib.axis.Axis`
because the locator stores references to the Axis data and view
limits
"""
def __call__(self):
'Return the locations of the ticks'
raise NotImplementedError('Derived must override')
def view_limits(self, vmin, vmax):
"""
select a scale for the range from vmin to vmax
Normally This will be overridden.
"""
return mtransforms.nonsingular(vmin, vmax)
def autoscale(self):
'autoscale the view limits'
return self.view_limits(*self.axis.get_view_interval())
def pan(self, numsteps):
'Pan numticks (can be positive or negative)'
ticks = self()
numticks = len(ticks)
vmin, vmax = self.axis.get_view_interval()
vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander = 0.05)
if numticks>2:
step = numsteps*abs(ticks[0]-ticks[1])
else:
d = abs(vmax-vmin)
step = numsteps*d/6.
vmin += step
vmax += step
self.axis.set_view_interval(vmin, vmax, ignore=True)
def zoom(self, direction):
"Zoom in/out on axis; if direction is >0 zoom in, else zoom out"
vmin, vmax = self.axis.get_view_interval()
vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander = 0.05)
interval = abs(vmax-vmin)
step = 0.1*interval*direction
self.axis.set_view_interval(vmin + step, vmax - step, ignore=True)
def refresh(self):
'refresh internal information based on current lim'
pass
class IndexLocator(Locator):
"""
Place a tick on every multiple of some base number of points
plotted, eg on every 5th point. It is assumed that you are doing
index plotting; ie the axis is 0, len(data). This is mainly
useful for x ticks.
"""
def __init__(self, base, offset):
'place ticks on the i-th data points where (i-offset)%base==0'
self._base = base
self.offset = offset
def __call__(self):
'Return the locations of the ticks'
dmin, dmax = self.axis.get_data_interval()
return np.arange(dmin + self.offset, dmax+1, self._base)
class FixedLocator(Locator):
"""
Tick locations are fixed. If nbins is not None,
the array of possible positions will be subsampled to
keep the number of ticks <= nbins +1.
"""
def __init__(self, locs, nbins=None):
self.locs = locs
self.nbins = nbins
if self.nbins is not None:
self.nbins = max(self.nbins, 2)
def __call__(self):
'Return the locations of the ticks'
if self.nbins is None:
return self.locs
step = max(int(0.99 + len(self.locs) / float(self.nbins)), 1)
return self.locs[::step]
class NullLocator(Locator):
"""
No ticks
"""
def __call__(self):
'Return the locations of the ticks'
return []
class LinearLocator(Locator):
"""
Determine the tick locations
The first time this function is called it will try to set the
number of ticks to make a nice tick partitioning. Thereafter the
number of ticks will be fixed so that interactive navigation will
be nice
"""
def __init__(self, numticks = None, presets=None):
"""
Use presets to set locs based on lom. A dict mapping vmin, vmax->locs
"""
self.numticks = numticks
if presets is None:
self.presets = {}
else:
self.presets = presets
def __call__(self):
'Return the locations of the ticks'
vmin, vmax = self.axis.get_view_interval()
vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander = 0.05)
if vmax<vmin:
vmin, vmax = vmax, vmin
if (vmin, vmax) in self.presets:
return self.presets[(vmin, vmax)]
if self.numticks is None:
self._set_numticks()
if self.numticks==0: return []
ticklocs = np.linspace(vmin, vmax, self.numticks)
return ticklocs
def _set_numticks(self):
self.numticks = 11 # todo; be smart here; this is just for dev
def view_limits(self, vmin, vmax):
'Try to choose the view limits intelligently'
if vmax<vmin:
vmin, vmax = vmax, vmin
if vmin==vmax:
vmin-=1
vmax+=1
exponent, remainder = divmod(math.log10(vmax - vmin), 1)
if remainder < 0.5:
exponent -= 1
scale = 10**(-exponent)
vmin = math.floor(scale*vmin)/scale
vmax = math.ceil(scale*vmax)/scale
return mtransforms.nonsingular(vmin, vmax)
def closeto(x,y):
if abs(x-y)<1e-10: return True
else: return False
class Base:
'this solution has some hacks to deal with floating point inaccuracies'
def __init__(self, base):
assert(base>0)
self._base = base
def lt(self, x):
'return the largest multiple of base < x'
d,m = divmod(x, self._base)
if closeto(m,0) and not closeto(m/self._base,1):
return (d-1)*self._base
return d*self._base
def le(self, x):
'return the largest multiple of base <= x'
d,m = divmod(x, self._base)
if closeto(m/self._base,1): # was closeto(m, self._base)
#looks like floating point error
return (d+1)*self._base
return d*self._base
def gt(self, x):
'return the smallest multiple of base > x'
d,m = divmod(x, self._base)
if closeto(m/self._base,1):
#looks like floating point error
return (d+2)*self._base
return (d+1)*self._base
def ge(self, x):
'return the smallest multiple of base >= x'
d,m = divmod(x, self._base)
if closeto(m,0) and not closeto(m/self._base,1):
return d*self._base
return (d+1)*self._base
def get_base(self):
return self._base
class MultipleLocator(Locator):
"""
Set a tick on every integer that is multiple of base in the
view interval
"""
def __init__(self, base=1.0):
self._base = Base(base)
def __call__(self):
'Return the locations of the ticks'
vmin, vmax = self.axis.get_view_interval()
if vmax<vmin:
vmin, vmax = vmax, vmin
vmin = self._base.ge(vmin)
base = self._base.get_base()
n = (vmax - vmin + 0.001*base)//base
locs = vmin + np.arange(n+1) * base
return locs
def view_limits(self, dmin, dmax):
"""
Set the view limits to the nearest multiples of base that
contain the data
"""
vmin = self._base.le(dmin)
vmax = self._base.ge(dmax)
if vmin==vmax:
vmin -=1
vmax +=1
return mtransforms.nonsingular(vmin, vmax)
def scale_range(vmin, vmax, n = 1, threshold=100):
dv = abs(vmax - vmin)
maxabsv = max(abs(vmin), abs(vmax))
if maxabsv == 0 or dv/maxabsv < 1e-12:
return 1.0, 0.0
meanv = 0.5*(vmax+vmin)
if abs(meanv)/dv < threshold:
offset = 0
elif meanv > 0:
ex = divmod(math.log10(meanv), 1)[0]
offset = 10**ex
else:
ex = divmod(math.log10(-meanv), 1)[0]
offset = -10**ex
ex = divmod(math.log10(dv/n), 1)[0]
scale = 10**ex
return scale, offset
class MaxNLocator(Locator):
"""
Select no more than N intervals at nice locations.
"""
def __init__(self, nbins = 10, steps = None,
trim = True,
integer=False,
symmetric=False):
self._nbins = int(nbins)
self._trim = trim
self._integer = integer
self._symmetric = symmetric
if steps is None:
self._steps = [1, 1.5, 2, 2.5, 3, 4, 5, 6, 8, 10]
else:
if int(steps[-1]) != 10:
steps = list(steps)
steps.append(10)
self._steps = steps
if integer:
self._steps = [n for n in self._steps if divmod(n,1)[1] < 0.001]
def bin_boundaries(self, vmin, vmax):
nbins = self._nbins
scale, offset = scale_range(vmin, vmax, nbins)
if self._integer:
scale = max(1, scale)
vmin -= offset
vmax -= offset
raw_step = (vmax-vmin)/nbins
scaled_raw_step = raw_step/scale
for step in self._steps:
if step < scaled_raw_step:
continue
step *= scale
best_vmin = step*divmod(vmin, step)[0]
best_vmax = best_vmin + step*nbins
if (best_vmax >= vmax):
break
if self._trim:
extra_bins = int(divmod((best_vmax - vmax), step)[0])
nbins -= extra_bins
return (np.arange(nbins+1) * step + best_vmin + offset)
def __call__(self):
vmin, vmax = self.axis.get_view_interval()
vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander = 0.05)
return self.bin_boundaries(vmin, vmax)
def view_limits(self, dmin, dmax):
if self._symmetric:
maxabs = max(abs(dmin), abs(dmax))
dmin = -maxabs
dmax = maxabs
dmin, dmax = mtransforms.nonsingular(dmin, dmax, expander = 0.05)
return np.take(self.bin_boundaries(dmin, dmax), [0,-1])
def decade_down(x, base=10):
'floor x to the nearest lower decade'
lx = math.floor(math.log(x)/math.log(base))
return base**lx
def decade_up(x, base=10):
'ceil x to the nearest higher decade'
lx = math.ceil(math.log(x)/math.log(base))
return base**lx
def is_decade(x,base=10):
lx = math.log(x)/math.log(base)
return lx==int(lx)
class LogLocator(Locator):
"""
Determine the tick locations for log axes
"""
def __init__(self, base=10.0, subs=[1.0]):
"""
place ticks on the location= base**i*subs[j]
"""
self.base(base)
self.subs(subs)
self.numticks = 15
def base(self,base):
"""
set the base of the log scaling (major tick every base**i, i interger)
"""
self._base=base+0.0
def subs(self,subs):
"""
set the minor ticks the log scaling every base**i*subs[j]
"""
if subs is None:
self._subs = None # autosub
else:
self._subs = np.asarray(subs)+0.0
def _set_numticks(self):
self.numticks = 15 # todo; be smart here; this is just for dev
def __call__(self):
'Return the locations of the ticks'
b=self._base
vmin, vmax = self.axis.get_view_interval()
if vmin <= 0.0:
vmin = self.axis.get_minpos()
if vmin <= 0.0:
raise ValueError(
"Data has no positive values, and therefore can not be log-scaled.")
vmin = math.log(vmin)/math.log(b)
vmax = math.log(vmax)/math.log(b)
if vmax<vmin:
vmin, vmax = vmax, vmin
numdec = math.floor(vmax)-math.ceil(vmin)
if self._subs is None: # autosub
if numdec>10: subs = np.array([1.0])
elif numdec>6: subs = np.arange(2.0, b, 2.0)
else: subs = np.arange(2.0, b)
else:
subs = self._subs
stride = 1
while numdec/stride+1 > self.numticks:
stride += 1
decades = np.arange(math.floor(vmin),
math.ceil(vmax)+stride, stride)
if len(subs) > 1 or (len(subs == 1) and subs[0] != 1.0):
ticklocs = []
for decadeStart in b**decades:
ticklocs.extend( subs*decadeStart )
else:
ticklocs = b**decades
return np.array(ticklocs)
def view_limits(self, vmin, vmax):
'Try to choose the view limits intelligently'
if vmax<vmin:
vmin, vmax = vmax, vmin
minpos = self.axis.get_minpos()
if minpos<=0:
raise ValueError(
"Data has no positive values, and therefore can not be log-scaled.")
if vmin <= minpos:
vmin = minpos
if not is_decade(vmin,self._base): vmin = decade_down(vmin,self._base)
if not is_decade(vmax,self._base): vmax = decade_up(vmax,self._base)
if vmin==vmax:
vmin = decade_down(vmin,self._base)
vmax = decade_up(vmax,self._base)
result = mtransforms.nonsingular(vmin, vmax)
return result
class SymmetricalLogLocator(Locator):
"""
Determine the tick locations for log axes
"""
def __init__(self, transform, subs=[1.0]):
"""
place ticks on the location= base**i*subs[j]
"""
self._transform = transform
self._subs = subs
self.numticks = 15
def _set_numticks(self):
self.numticks = 15 # todo; be smart here; this is just for dev
def __call__(self):
'Return the locations of the ticks'
b = self._transform.base
vmin, vmax = self.axis.get_view_interval()
vmin, vmax = self._transform.transform((vmin, vmax))
if vmax<vmin:
vmin, vmax = vmax, vmin
numdec = math.floor(vmax)-math.ceil(vmin)
if self._subs is None:
if numdec>10: subs = np.array([1.0])
elif numdec>6: subs = np.arange(2.0, b, 2.0)
else: subs = np.arange(2.0, b)
else:
subs = np.asarray(self._subs)
stride = 1
while numdec/stride+1 > self.numticks:
stride += 1
decades = np.arange(math.floor(vmin), math.ceil(vmax)+stride, stride)
if len(subs) > 1 or subs[0] != 1.0:
ticklocs = []
for decade in decades:
ticklocs.extend(subs * (np.sign(decade) * b ** np.abs(decade)))
else:
ticklocs = np.sign(decades) * b ** np.abs(decades)
return np.array(ticklocs)
def view_limits(self, vmin, vmax):
'Try to choose the view limits intelligently'
b = self._transform.base
if vmax<vmin:
vmin, vmax = vmax, vmin
if not is_decade(abs(vmin), b):
if vmin < 0:
vmin = -decade_up(-vmin, b)
else:
vmin = decade_down(vmin, b)
if not is_decade(abs(vmax), b):
if vmax < 0:
vmax = -decade_down(-vmax, b)
else:
vmax = decade_up(vmax, b)
if vmin == vmax:
if vmin < 0:
vmin = -decade_up(-vmin, b)
vmax = -decade_down(-vmax, b)
else:
vmin = decade_down(vmin, b)
vmax = decade_up(vmax, b)
result = mtransforms.nonsingular(vmin, vmax)
return result
class AutoLocator(MaxNLocator):
def __init__(self):
MaxNLocator.__init__(self, nbins=9, steps=[1, 2, 5, 10])
class OldAutoLocator(Locator):
"""
On autoscale this class picks the best MultipleLocator to set the
view limits and the tick locs.
"""
def __init__(self):
self._locator = LinearLocator()
def __call__(self):
'Return the locations of the ticks'
self.refresh()
return self._locator()
def refresh(self):
'refresh internal information based on current lim'
vmin, vmax = self.axis.get_view_interval()
vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander = 0.05)
d = abs(vmax-vmin)
self._locator = self.get_locator(d)
def view_limits(self, vmin, vmax):
'Try to choose the view limits intelligently'
d = abs(vmax-vmin)
self._locator = self.get_locator(d)
return self._locator.view_limits(vmin, vmax)
def get_locator(self, d):
'pick the best locator based on a distance'
d = abs(d)
if d<=0:
locator = MultipleLocator(0.2)
else:
try: ld = math.log10(d)
except OverflowError:
raise RuntimeError('AutoLocator illegal data interval range')
fld = math.floor(ld)
base = 10**fld
#if ld==fld: base = 10**(fld-1)
#else: base = 10**fld
if d >= 5*base : ticksize = base
elif d >= 2*base : ticksize = base/2.0
else : ticksize = base/5.0
locator = MultipleLocator(ticksize)
return locator
__all__ = ('TickHelper', 'Formatter', 'FixedFormatter',
'NullFormatter', 'FuncFormatter', 'FormatStrFormatter',
'ScalarFormatter', 'LogFormatter', 'LogFormatterExponent',
'LogFormatterMathtext', 'Locator', 'IndexLocator',
'FixedLocator', 'NullLocator', 'LinearLocator',
'LogLocator', 'AutoLocator', 'MultipleLocator',
'MaxNLocator', )
| agpl-3.0 |
rrohan/scikit-learn | examples/missing_values.py | 233 | 3056 | """
======================================================
Imputing missing values before building an estimator
======================================================
This example shows that imputing the missing values can give better results
than discarding the samples containing any missing value.
Imputing does not always improve the predictions, so please check via cross-validation.
Sometimes dropping rows or using marker values is more effective.
Missing values can be replaced by the mean, the median or the most frequent
value using the ``strategy`` hyper-parameter.
The median is a more robust estimator for data with high magnitude variables
which could dominate results (otherwise known as a 'long tail').
Script output::
Score with the entire dataset = 0.56
Score without the samples containing missing values = 0.48
Score after imputation of the missing values = 0.55
In this case, imputing helps the classifier get close to the original score.
"""
import numpy as np
from sklearn.datasets import load_boston
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.cross_validation import cross_val_score
rng = np.random.RandomState(0)
dataset = load_boston()
X_full, y_full = dataset.data, dataset.target
n_samples = X_full.shape[0]
n_features = X_full.shape[1]
# Estimate the score on the entire dataset, with no missing values
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_full, y_full).mean()
print("Score with the entire dataset = %.2f" % score)
# Add missing values in 75% of the lines
missing_rate = 0.75
n_missing_samples = np.floor(n_samples * missing_rate)
missing_samples = np.hstack((np.zeros(n_samples - n_missing_samples,
dtype=np.bool),
np.ones(n_missing_samples,
dtype=np.bool)))
rng.shuffle(missing_samples)
missing_features = rng.randint(0, n_features, n_missing_samples)
# Estimate the score without the lines containing missing values
X_filtered = X_full[~missing_samples, :]
y_filtered = y_full[~missing_samples]
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_filtered, y_filtered).mean()
print("Score without the samples containing missing values = %.2f" % score)
# Estimate the score after imputation of the missing values
X_missing = X_full.copy()
X_missing[np.where(missing_samples)[0], missing_features] = 0
y_missing = y_full.copy()
estimator = Pipeline([("imputer", Imputer(missing_values=0,
strategy="mean",
axis=0)),
("forest", RandomForestRegressor(random_state=0,
n_estimators=100))])
score = cross_val_score(estimator, X_missing, y_missing).mean()
print("Score after imputation of the missing values = %.2f" % score)
| bsd-3-clause |
mayblue9/scikit-learn | examples/datasets/plot_random_dataset.py | 348 | 2254 | """
==============================================
Plot randomly generated classification dataset
==============================================
Plot several randomly generated 2D classification datasets.
This example illustrates the :func:`datasets.make_classification`
:func:`datasets.make_blobs` and :func:`datasets.make_gaussian_quantiles`
functions.
For ``make_classification``, three binary and two multi-class classification
datasets are generated, with different numbers of informative features and
clusters per class. """
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_gaussian_quantiles
plt.figure(figsize=(8, 8))
plt.subplots_adjust(bottom=.05, top=.9, left=.05, right=.95)
plt.subplot(321)
plt.title("One informative feature, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=1,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(322)
plt.title("Two informative features, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(323)
plt.title("Two informative features, two clusters per class", fontsize='small')
X2, Y2 = make_classification(n_features=2, n_redundant=0, n_informative=2)
plt.scatter(X2[:, 0], X2[:, 1], marker='o', c=Y2)
plt.subplot(324)
plt.title("Multi-class, two informative features, one cluster",
fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(325)
plt.title("Three blobs", fontsize='small')
X1, Y1 = make_blobs(n_features=2, centers=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(326)
plt.title("Gaussian divided into three quantiles", fontsize='small')
X1, Y1 = make_gaussian_quantiles(n_features=2, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.show()
| bsd-3-clause |
murven/malmo | Malmo/samples/Python_examples/render_speed_test.py | 3 | 8837 | # ------------------------------------------------------------------------------------------------
# Copyright (c) 2016 Microsoft Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# ------------------------------------------------------------------------------------------------
# Tests overclocking the render speed by running a very simple task at a series of different frame sizes.
import MalmoPython
import os
import random
import sys
import time
import json
import errno
from timeit import default_timer as timer
def GetMissionXML( width, height, prioritiseOffscreen ):
return '''<?xml version="1.0" encoding="UTF-8" ?>
<Mission xmlns="http://ProjectMalmo.microsoft.com" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<About>
<Summary>Let's run! Size: ''' + width + ''' x ''' + height + '''</Summary>
</About>
<ModSettings>
<PrioritiseOffscreenRendering>''' + prioritiseOffscreen + '''</PrioritiseOffscreenRendering>
</ModSettings>
<ServerSection>
<ServerInitialConditions>
<AllowSpawning>false</AllowSpawning>
<Time>
<StartTime>1000</StartTime>
<AllowPassageOfTime>false</AllowPassageOfTime>
</Time>
<Weather>clear</Weather>
</ServerInitialConditions>
<ServerHandlers>
<FlatWorldGenerator generatorString="3;7,220*1,5*3,121;3;biome_1" />
<DrawingDecorator>
<DrawCuboid x1="0" y1="226" z1="0" x2="0" y2="226" z2="1000" type="stone" variant="smooth_granite"/>
<DrawBlock x="0" y="226" z="130" type="emerald_block"/>
</DrawingDecorator>
<ServerQuitFromTimeUp timeLimitMs="''' + str(MISSION_LENGTH * 1000) + '''"/>
<ServerQuitWhenAnyAgentFinishes />
</ServerHandlers>
</ServerSection>
<AgentSection mode="Survival">
<Name>Picasso</Name>
<AgentStart>
<Placement x="0.5" y="227.0" z="0.5"/>
</AgentStart>
<AgentHandlers>
<ContinuousMovementCommands turnSpeedDegs="240"/>
<ObservationFromDistance>
<Marker name="Start" x="0.5" y="227.0" z="0.5"/>
</ObservationFromDistance>
<AgentQuitFromTouchingBlockType>
<Block type="redstone_block" />
</AgentQuitFromTouchingBlockType>
<VideoProducer>
<Width>''' + width + '''</Width>
<Height>''' + height + '''</Height>
</VideoProducer>
</AgentHandlers>
</AgentSection>
</Mission>'''
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0) # flush print output immediately
agent_host = MalmoPython.AgentHost()
try:
agent_host.parse( sys.argv )
except RuntimeError as e:
print 'ERROR:',e
print agent_host.getUsage()
exit(1)
if agent_host.receivedArgument("help"):
print agent_host.getUsage()
exit(0)
if agent_host.receivedArgument("test"):
MISSION_LENGTH=5
SHOW_PLOT=False
else:
MISSION_LENGTH=10
SHOW_PLOT=True
if SHOW_PLOT:
import matplotlib
import numpy
import pylab
validate = True
sizes = [(1920,1200), (1280, 920), (1024,768), (860,480), (640,256), (400,400), (400,300), (432,240), (320,240), (256,256), (224,144), (84,84), (80,80), (80,60)]
num_pixels=[]
fps_offscreen=[]
fps_onscreen=[]
datarate_offscreen=[]
datarate_onscreen=[]
agent_host.setObservationsPolicy(MalmoPython.ObservationsPolicy.LATEST_OBSERVATION_ONLY)
recordingsDirectory="Render_Speed_Test_Recordings"
try:
os.makedirs(recordingsDirectory)
except OSError as exception:
if exception.errno != errno.EEXIST: # ignore error if already existed
raise
print "WELCOME TO THE RENDER SPEED TEST"
print "================================"
print "This will run the same simple mission with " + str(len(sizes)) + " different frame sizes."
for iRepeat in range(len(sizes) * 2):
prioritiseOffscreen = "true" if iRepeat % 2 else "false"
width,height = sizes[iRepeat/2]
if iRepeat % 2:
num_pixels.append(width*height)
my_mission = MalmoPython.MissionSpec(GetMissionXML(str(width), str(height), prioritiseOffscreen), validate)
# Set up a recording - MUST be done once for each mission - don't do this outside the loop!
my_mission_record = MalmoPython.MissionRecordSpec(recordingsDirectory + "//RenderSpeed_Test" + str(iRepeat) + ".tgz");
my_mission_record.recordRewards()
my_mission_record.recordObservations()
my_mission_record.recordMP4(120,1200000) # Attempt to record at 120fps
max_retries = 3
for retry in range(max_retries):
try:
agent_host.startMission( my_mission, my_mission_record )
break
except RuntimeError as e:
if retry == max_retries - 1:
print "Error starting mission:",e
exit(1)
else:
time.sleep(2)
world_state = agent_host.getWorldState()
while not world_state.is_mission_running:
time.sleep(0.1)
world_state = agent_host.getWorldState()
if len(world_state.errors):
print
for error in world_state.errors:
print "Error:",error.text
exit()
print
# main loop:
agent_host.sendCommand("move 1") # just go forwards, max speed.
numFrames=0
start = timer()
while world_state.is_mission_running:
world_state = agent_host.getWorldState()
if world_state.number_of_video_frames_since_last_state > 0:
numFrames = numFrames + world_state.number_of_video_frames_since_last_state
end = timer()
missionTimeMs = (end - start) * 1000
dataShifted = (width * height * 3 * numFrames) / (1024*1024)
averagefps = numFrames * 1000 / missionTimeMs
datarate = dataShifted * 1000 / missionTimeMs
print "==============================================================================================="
print "Result of test " + str(iRepeat + 1) + ":"
print "==============================================================================================="
print "Frame size: " + str(width) + " x " + str(height)
print "Prioritising offscreen rendering: " + prioritiseOffscreen
print "Frames received: " + str(numFrames)
print "Average fps: " + "{0:.2f}".format(averagefps)
print "Frame data transferred: " + "{0:.2f}".format(dataShifted) + "MB"
print "Data transfer rate: " + "{0:.2f}".format(datarate) + "MB/s"
print "==============================================================================================="
print
if iRepeat % 2:
fps_offscreen.append(averagefps)
datarate_offscreen.append(datarate)
else:
fps_onscreen.append(averagefps)
datarate_onscreen.append(datarate)
time.sleep(0.5) # Give mod a little time to get back to dormant state.
if SHOW_PLOT:
# Now plot some graphs:
plot_fpsoff = pylab.plot(num_pixels, fps_offscreen, 'r', label='render speed (no onscreen updates)')
plot_fpson = pylab.plot(num_pixels, fps_onscreen, 'g', label='render speed (with onscreen updates)')
plot_dataoff = pylab.plot(num_pixels, datarate_offscreen, 'b', label='data transfer speed (no onscreen updates)')
plot_dataon = pylab.plot(num_pixels, datarate_onscreen, 'y', label='data transfer speed (with onscreen updates)')
pylab.xlabel("Frame size (pixels)")
pylab.ylabel("MB/s or frames/s")
pylab.legend()
pylab.title("Plot of render and data-transfer speeds for varying frame sizes, with and without onscreen rendering")
pylab.show()
| mit |
shusenl/scikit-learn | sklearn/utils/tests/test_random.py | 230 | 7344 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from scipy.misc import comb as combinations
from numpy.testing import assert_array_almost_equal
from sklearn.utils.random import sample_without_replacement
from sklearn.utils.random import random_choice_csc
from sklearn.utils.testing import (
assert_raises,
assert_equal,
assert_true)
###############################################################################
# test custom sampling without replacement algorithm
###############################################################################
def test_invalid_sample_without_replacement_algorithm():
assert_raises(ValueError, sample_without_replacement, 5, 4, "unknown")
def test_sample_without_replacement_algorithms():
methods = ("auto", "tracking_selection", "reservoir_sampling", "pool")
for m in methods:
def sample_without_replacement_method(n_population, n_samples,
random_state=None):
return sample_without_replacement(n_population, n_samples,
method=m,
random_state=random_state)
check_edge_case_of_sample_int(sample_without_replacement_method)
check_sample_int(sample_without_replacement_method)
check_sample_int_distribution(sample_without_replacement_method)
def check_edge_case_of_sample_int(sample_without_replacement):
# n_poluation < n_sample
assert_raises(ValueError, sample_without_replacement, 0, 1)
assert_raises(ValueError, sample_without_replacement, 1, 2)
# n_population == n_samples
assert_equal(sample_without_replacement(0, 0).shape, (0, ))
assert_equal(sample_without_replacement(1, 1).shape, (1, ))
# n_population >= n_samples
assert_equal(sample_without_replacement(5, 0).shape, (0, ))
assert_equal(sample_without_replacement(5, 1).shape, (1, ))
# n_population < 0 or n_samples < 0
assert_raises(ValueError, sample_without_replacement, -1, 5)
assert_raises(ValueError, sample_without_replacement, 5, -1)
def check_sample_int(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# the sample is of the correct length and contains only unique items
n_population = 100
for n_samples in range(n_population + 1):
s = sample_without_replacement(n_population, n_samples)
assert_equal(len(s), n_samples)
unique = np.unique(s)
assert_equal(np.size(unique), n_samples)
assert_true(np.all(unique < n_population))
# test edge case n_population == n_samples == 0
assert_equal(np.size(sample_without_replacement(0, 0)), 0)
def check_sample_int_distribution(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# sample generates all possible permutations
n_population = 10
# a large number of trials prevents false negatives without slowing normal
# case
n_trials = 10000
for n_samples in range(n_population):
# Counting the number of combinations is not as good as counting the
# the number of permutations. However, it works with sampling algorithm
# that does not provide a random permutation of the subset of integer.
n_expected = combinations(n_population, n_samples, exact=True)
output = {}
for i in range(n_trials):
output[frozenset(sample_without_replacement(n_population,
n_samples))] = None
if len(output) == n_expected:
break
else:
raise AssertionError(
"number of combinations != number of expected (%s != %s)" %
(len(output), n_expected))
def test_random_choice_csc(n_samples=10000, random_state=24):
# Explicit class probabilities
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Implicit class probabilities
classes = [[0, 1], [1, 2]] # test for array-like support
class_probabilites = [np.array([0.5, 0.5]), np.array([0, 1/2, 1/2])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Edge case proabilites 1.0 and 0.0
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([1.0, 0.0]), np.array([0.0, 1.0, 0.0])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel(),
minlength=len(class_probabilites[k])) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# One class target data
classes = [[1], [0]] # test for array-like support
class_probabilites = [np.array([0.0, 1.0]), np.array([1.0])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
def test_random_choice_csc_errors():
# the length of an array in classes and class_probabilites is mismatched
classes = [np.array([0, 1]), np.array([0, 1, 2, 3])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array(["a", "1"]), np.array(["z", "1", "2"])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array([4.2, 0.1]), np.array([0.1, 0.2, 9.4])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# Given proabilites don't sum to 1
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.6]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
| bsd-3-clause |
Amndeep7/DrexelCSDepartmentAnalysisTool | pie.py | 1 | 2446 | from matplotlib.pylab import figure, axes, pie, title, savefig, clf
import argparse
import csv
import sys
#To run from command line
# python pie.py CSV_FileName Name_Of_Grade Minimum_Threshold Maximum_Threshold
def get_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('results')
parser.add_argument('grade_column')
parser.add_argument('lower_threshold', type=float)
parser.add_argument('higher_threshold', type=float)
return parser.parse_args()
# Read CSV to define dictionaries for total students and students that met the threshold within each major
def read_csv(filename):
total_students = dict()
met_students = dict()
with open(filename, newline='', encoding='utf-8') as csvfile:
inputcsv = csv.DictReader(csvfile)
for row in inputcsv:
total_students[row['Major']] = int(row['Total Students'])
met_students[row['Major']] = int(row['Met Threshold'])
return total_students, met_students
def create_piecharts(grade, total_students, met_students, min_threshold, max_threshold):
for major, total in total_students.items():
# Make a square figure and axes
figure(1, figsize=(6,6))
ax = axes([0.1, 0.1, 0.8, 0.8])
# The slices will be ordered and plotted counter-clockwise.
labels = 'Met threshold', 'Did not meet threshold'
fracs = [met_students[major]/total, (total-met_students[major])/total]
# Pop the slice of the pie out for emphasis
explode=(0.1, 0)
pie(fracs, explode=explode, labels=labels,
autopct='%1.1f%%', shadow=True, startangle=90)
title('%s Students Between %s-%s for Assignment %s' % (major, min_threshold, max_threshold, grade),
bbox={'facecolor':'0.8', 'pad':5})
# Save the figure as a png
savefig("piechart_%s_%s_%s-%s.png" % (grade, major, min_threshold, max_threshold), bbox_inches="tight")
clf()
def main():
#System arguments should be as follows
# sys.argv[1] = Filename of CSV generated from range.py
# sys.argv[2] = Name of Grade
# sys.argv[3] = Minimum Threshold value
# sys.argv[4] = Maximum Threshold value
args = get_arguments()
total_students, met_students = read_csv(args.results)
create_piecharts(args.grade_column, total_students, met_students, args.lower_threshold, args.higher_threshold)
if __name__ == "__main__":
main()
| mit |
RhysU/suzerain | postproc/plot_stats.py | 1 | 15061 | #!/usr/bin/env python
"""Usage: plot_stats.py HDF5FILE
Plot wall-normal profiles averaged over (X,Z) directions from HDF5FILE.
Options:
-f --file_ext Output file extension. Default is 'eps'.
-h --help This help message.
--plot_all Generate secondary 'debug' type of plots.
"""
# TODO: Add ability to plot things other than bar_rho.
import sys
import getopt
import h5py
import numpy as np
import gb
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot
from scipy.interpolate import interp1d
def plot(hdf5file, fileext, ifile, plot_all):
print "Plotting", hdf5file
# Load a stats file
f = h5py.File(hdf5file,'r')
# Grab number of collocation points and B-spline order
Ny=f['Ny'].value[0]
k=f['k'].value[0]
# Grab collocation points
y = f['collocation_points_y'].value
# Grab number of species
Ns=f['antioch_constitutive_data'].attrs['Ns'][0]
# Grab species names
sname= np.chararray(Ns, itemsize=5)
for s in xrange(0,Ns):
sname[s]=f['antioch_constitutive_data'].attrs['Species_'+str(s)]
# Get "mass" matrix and convert to dense format
D0T_gb = f['Dy0T'].value
D0T = gb.gb2ge(D0T_gb, Ny, k-2)
# Get "mass" matrix and convert to dense format
D1T_gb = f['Dy1T'].value
D1T = gb.gb2ge(D1T_gb, Ny, k-2)
# Get "mass" matrix and convert to dense format
D2T_gb = f['Dy2T'].value
D2T = gb.gb2ge(D2T_gb, Ny, k-2)
# Grab rho coefficients
rho_coeff = f['bar_rho'].value
rho_coeff = np.array(rho_coeff).reshape(Ny,1)
# Grab rho_u coefficients
rho_u_coeff = f['bar_rho_u'].value
rho_u_coeff = np.array(rho_u_coeff).transpose().reshape(Ny,3)
# Grab rho_E coefficients
rho_E_coeff = f['bar_rho_E'].value
rho_E_coeff = np.array(rho_E_coeff).reshape(Ny,1)
# Grab T coefficients
T_coeff = f['bar_T'].value
T_coeff = np.array(T_coeff).reshape(Ny,1)
# Grab T coefficients
T_T_coeff = f['bar_T_T'].value
T_T_coeff = np.array(T_T_coeff).reshape(Ny,1)
# Grab rho_u_u coefficients
rho_u_u_coeff = f['bar_rho_u_u'].value
rho_u_u_coeff = np.array(rho_u_u_coeff).transpose().reshape(Ny,6)
# Grab rho_s coefficients
rho_s_coeff = f['bar_rho_s'].value
rho_s_coeff = np.array(rho_s_coeff).transpose().reshape(Ny,Ns)
# Grab reaction rate coefficients
om_s_coeff = f['bar_om_s'].value
om_s_coeff = np.array(om_s_coeff).transpose().reshape(Ny,Ns)
# Grab p coefficients
p_coeff = f['bar_p'].value
p_coeff = np.array(p_coeff).reshape(Ny,1)
# Grab a coefficients
a_coeff = f['bar_a'].value
a_coeff = np.array(a_coeff).reshape(Ny,1)
# Grid delta y, colocation points
dy = np.diff(y)
dy = np.append(dy,dy[Ny-2])
dy = np.array(dy).reshape(Ny,1)
# Grab mu coefficients
mu_coeff = f['bar_mu'].value
mu_coeff = np.array(mu_coeff).reshape(Ny,1)
# Grab nu coefficients
nu_coeff = f['bar_nu'].value
nu_coeff = np.array(nu_coeff).reshape(Ny,1)
# Grab breakpoints
yb = f['breakpoints_y'].value
# Grid delta y, breakpoints
dyb = np.diff(yb)
dyb = np.append(dyb,dyb[Ny-6])
dyb = np.array(dyb).reshape(Ny-4,1)
# load baseflow coefficients
base_rho = None
base_rho_u = None
base_rho_v = None
base_rho_w = None
base_rho_E = None
base_p = None
if "largo_baseflow" in f:
if f['largo_baseflow'].attrs['coefficient_base'] == 'polynomial':
baseflow_coeff = f['largo_baseflow'].value
# print 'baseflow coefficients loaded'
npoly = baseflow_coeff.shape[1]
base_rho = np.zeros((Ny,1))
base_rho_u = np.zeros((Ny,1))
base_rho_v = np.zeros((Ny,1))
base_rho_w = np.zeros((Ny,1))
base_rho_E = np.zeros((Ny,1))
base_p = np.zeros((Ny,1))
for i in xrange(0,npoly):
for j in xrange(0, Ny):
y_power_i = np.power(y[j,],i)
base_rho [j,0] += y_power_i * baseflow_coeff[0,i]
base_rho_u[j,0] += y_power_i * baseflow_coeff[1,i]
base_rho_v[j,0] += y_power_i * baseflow_coeff[2,i]
base_rho_w[j,0] += y_power_i * baseflow_coeff[3,i]
base_rho_E[j,0] += y_power_i * baseflow_coeff[4,i]
base_p [j,0] += y_power_i * baseflow_coeff[5,i]
#else:
# skip loading
# print 'baseflow coefficients not polynomial'
# Done getting data
f.close()
D0 = D0T.transpose()
D1 = D1T.transpose()
D2 = D2T.transpose()
# Coefficients -> Collocation points
rho_col = D0*rho_coeff
rho_u_col = D0*rho_u_coeff
rho_E_col = D0*rho_E_coeff
T_col = D0*T_coeff
T_T_col = D0*T_T_coeff
rho_u_u_col = D0*rho_u_u_coeff
rho_s_col = D0*rho_s_coeff
om_s_col = D0*om_s_coeff
mu_col = D0*mu_coeff
nu_col = D0*nu_coeff
p_col = D0*p_coeff
a_col = D0*a_coeff
rho_E_col_y = D1*rho_E_coeff
rho_E_col_yy = D2*rho_E_coeff
p_col_y = D1*p_coeff
p_col_yy = D2*p_coeff
rho_col_y = D1*rho_coeff
rho_col_yy = D2*rho_coeff
# Computed quantities
# - Favre averages
fav_u = np.array(rho_u_col[:,0]/rho_col[:,0]).reshape(Ny,1)
fav_v = np.array(rho_u_col[:,1]/rho_col[:,0]).reshape(Ny,1)
fav_w = np.array(rho_u_col[:,2]/rho_col[:,0]).reshape(Ny,1)
fav_H = np.array((rho_E_col[:,0] + p_col[:,0])/rho_col[:,0]).reshape(Ny,1)
if (plot_all):
# d(d(\fav{H})/dy)/dy
rho_col_2 = np.multiply(rho_col [:,0], rho_col [:,0]).reshape(Ny,1)
rho_col_3 = np.multiply(rho_col_2[:,0], rho_col [:,0]).reshape(Ny,1)
rho_col_y2 = np.multiply(rho_col_y[:,0], rho_col_y[:,0]).reshape(Ny,1)
fav_H_yy = np.array ((rho_E_col_yy[:,0] + p_col_yy[:,0])/rho_col [:,0] ).reshape(Ny,1)
fav_H_yy -= 2.*np.multiply((rho_E_col_y [:,0] + p_col_y [:,0])/rho_col_2[:,0], rho_col_y [:,0]).reshape(Ny,1)
fav_H_yy -= np.multiply((rho_E_col [:,0] + p_col [:,0])/rho_col_2[:,0], rho_col_yy[:,0]).reshape(Ny,1)
fav_H_yy += 2.*np.multiply((rho_E_col [:,0] + p_col [:,0])/rho_col_3[:,0], rho_col_y2[:,0]).reshape(Ny,1)
# - Bar rho_upp
# rho_upp = np.array(np.ravel(rho_col) * np.ravel(fav_u)).reshape(Ny,1)
rho_upp = rho_u_col[:,0] - np.array(np.ravel(rho_col) * np.ravel(fav_u)).reshape(Ny,1)
rho_vpp = rho_u_col[:,1] - np.array(np.ravel(rho_col) * np.ravel(fav_v)).reshape(Ny,1)
rho_wpp = rho_u_col[:,2] - np.array(np.ravel(rho_col) * np.ravel(fav_w)).reshape(Ny,1)
# - Reynolds stresses
R_u_u_col = rho_u_u_col[:,0] - np.multiply(np.multiply(rho_u_col[:,0], rho_u_col[:,0]).reshape(Ny,1), 1/rho_col[:,0])
R_u_u_col += 2.0 * np.array(np.ravel(rho_upp) * np.ravel(fav_u)).reshape(Ny,1)
R_u_v_col = rho_u_u_col[:,1] - np.multiply(np.multiply(rho_u_col[:,0], rho_u_col[:,1]).reshape(Ny,1), 1/rho_col[:,0])
R_u_v_col += np.array(np.ravel(rho_upp) * np.ravel(fav_v)).reshape(Ny,1) + np.array(np.ravel(rho_vpp) * np.ravel(fav_u)).reshape(Ny,1)
R_v_v_col = rho_u_u_col[:,3] - np.multiply(np.multiply(rho_u_col[:,1], rho_u_col[:,1]).reshape(Ny,1), 1/rho_col[:,0])
R_v_v_col += 2.0 * np.array(np.ravel(rho_vpp) * np.ravel(fav_v)).reshape(Ny,1)
R_w_w_col = rho_u_u_col[:,5] - np.multiply(np.multiply(rho_u_col[:,2], rho_u_col[:,2]).reshape(Ny,1), 1/rho_col[:,0])
R_w_w_col += 2.0 * np.array(np.ravel(rho_wpp) * np.ravel(fav_w)).reshape(Ny,1)
# - Viscous ts
nub = np.interp(yb,y,np.ravel(nu_col))
nub = np.array(nub).reshape(Ny-4,1)
inv_nub = 1/nub
dssqr_over_2nu = np.multiply(dyb[:,0], dyb[:,0]).reshape(Ny-4,1)
dssqr_over_2nu = np.multiply(dssqr_over_2nu[:,0], 1/nub[:,0]).reshape(Ny-4,1) * 0.5
# - Temperature rms
Tp_Tp = T_T_col - np.multiply(T_col,T_col).reshape(Ny,1)
# Plots
figid = 0
figid += 1
pyplot.figure(figid)
key = "bar_rho_" + str(ifile)
if (ifile == 0 and base_rho is not None):
pyplot.plot(y, base_rho[:,0], linewidth=1)
pyplot.plot(y, rho_col, linewidth=3, label=key)
pyplot.legend(loc=0)
pyplot.savefig('bar_rho.' + fileext, bbox_inches='tight')
figid += 1
pyplot.figure(figid)
key = "bar_rho_u" + str(ifile)
if (ifile == 0 and base_rho_u is not None):
pyplot.plot(y, base_rho_u[:,0], linewidth=1)
pyplot.plot(y, rho_u_col[:,0], linewidth=3, label=key)
pyplot.legend(loc=0)
pyplot.savefig('bar_rho_u.' + fileext, bbox_inches='tight')
figid += 1
pyplot.figure(figid)
key = "bar_rho_v" + str(ifile)
if (ifile == 0 and base_rho_v is not None):
pyplot.plot(y, base_rho_v[:,0], linewidth=1)
pyplot.semilogx(y, rho_u_col[:,1], linewidth=3, label=key)
pyplot.legend(loc=0)
pyplot.savefig('bar_rho_v.' + fileext, bbox_inches='tight')
figid += 1
pyplot.figure(figid)
key = "bar_rho_w" + str(ifile)
if (ifile == 0 and base_rho_w is not None):
pyplot.plot(y, base_rho_w[:,0], linewidth=1)
pyplot.plot(y, rho_u_col[:,2], linewidth=3, label=key)
pyplot.legend(loc=0)
pyplot.savefig('bar_rho_w.' + fileext, bbox_inches='tight')
figid += 1
pyplot.figure(figid)
key = "bar_rho_E" + str(ifile)
if (ifile == 0 and base_rho_E is not None):
pyplot.plot(y, base_rho_E[:,0], linewidth=1)
pyplot.plot(y, rho_E_col, linewidth=3, label=key)
pyplot.legend(loc=0)
pyplot.savefig('bar_rho_E.' + fileext, bbox_inches='tight')
figid += 1
pyplot.figure(figid)
key = "bar_T" + str(ifile)
pyplot.semilogx(y, T_col, linewidth=3, label=key)
pyplot.legend(loc=0)
pyplot.savefig('bar_T.' + fileext, bbox_inches='tight')
figid += 1
pyplot.figure(figid)
key = "sqrt_Tp_Tp" + str(ifile)
pyplot.semilogx(y, np.sqrt(Tp_Tp)/T_col, linewidth=3, label=key)
pyplot.legend(loc=0)
pyplot.savefig('sqrt_Tp_Tp.' + fileext, bbox_inches='tight')
figid += 1
pyplot.figure(figid)
key = "bar_p" + str(ifile)
if (ifile == 0 and base_p is not None):
pyplot.plot(y, base_p[:,0], linewidth=1)
pyplot.semilogx(y, p_col, linewidth=3, label=key)
pyplot.legend(loc=0)
pyplot.savefig('bar_p.' + fileext, bbox_inches='tight')
figid += 1
pyplot.figure(figid)
key = "bar_a" + str(ifile)
pyplot.semilogx(y, a_col, linewidth=3, label=key)
pyplot.legend(loc=0)
pyplot.savefig('bar_a.' + fileext, bbox_inches='tight')
figid += 1
pyplot.figure(figid)
key = "R_u_u" + str(ifile)
pyplot.semilogx(y, R_u_u_col, linewidth=3, label=key)
pyplot.legend(loc=0)
pyplot.savefig('R_u_u.' + fileext, bbox_inches='tight')
figid += 1
pyplot.figure(figid)
key = "R_u_v" + str(ifile)
pyplot.semilogx(y, R_u_v_col, linewidth=3, label=key)
pyplot.legend(loc=0)
pyplot.savefig('R_u_v.' + fileext, bbox_inches='tight')
figid += 1
pyplot.figure(figid)
key = "R_v_v" + str(ifile)
pyplot.semilogx(y, R_v_v_col, linewidth=3, label=key)
pyplot.legend(loc=0)
pyplot.savefig('R_v_v.' + fileext, bbox_inches='tight')
figid += 1
pyplot.figure(figid)
key = "R_w_w" + str(ifile)
pyplot.semilogx(y, R_w_w_col, linewidth=3, label=key)
pyplot.legend(loc=0)
pyplot.savefig('R_w_w.' + fileext, bbox_inches='tight')
for s in xrange(0,Ns):
figid += 1
pyplot.figure(figid)
key = "rho_" + sname[s] + "_" + str(ifile)
pyplot.semilogx(y, rho_s_col[:,s], linewidth=3, label=key)
pyplot.legend(loc=0)
rho_file = "rho_" + sname[s] + "." + fileext
pyplot.savefig(rho_file, bbox_inches='tight')
for s in xrange(0,Ns):
figid += 1
pyplot.figure(figid)
key = "om_" + sname[s] + "_" + str(ifile)
pyplot.semilogx(y, om_s_col[:,s], linewidth=3, label=key)
pyplot.legend(loc=0)
rho_file = "om_" + sname[s] + "." + fileext
pyplot.savefig(rho_file, bbox_inches='tight')
figid += 1
pyplot.figure(figid)
key = "dy_collocation" + str(ifile)
pyplot.loglog(y, dy, 'o-', linewidth=3, label=key)
pyplot.legend(loc=0)
pyplot.savefig('dy.' + fileext, bbox_inches='tight')
figid += 1
pyplot.figure(figid)
key = "dy_break" + str(ifile)
pyplot.loglog(yb, dyb, 'o-', linewidth=3, label=key)
pyplot.legend(loc=0)
pyplot.savefig('dyb.' + fileext, bbox_inches='tight')
figid += 1
pyplot.figure(figid)
key = "bar_mu" + str(ifile)
pyplot.semilogx(y, mu_col, linewidth=3, label=key)
pyplot.legend(loc=0)
pyplot.savefig('bar_mu.' + fileext, bbox_inches='tight')
figid += 1
pyplot.figure(figid)
key = "bar_nu" + str(ifile)
pyplot.semilogx(y, nu_col, linewidth=3, label=key)
pyplot.legend(loc=0)
pyplot.savefig('bar_nu.' + fileext, bbox_inches='tight')
figid += 1
pyplot.figure(figid)
key = "dssqr_over_2nu" + str(ifile)
pyplot.loglog(yb, dssqr_over_2nu[:,0], linewidth=3, label=key)
pyplot.legend(loc=0)
pyplot.savefig('dssqr_over_2nu.' + fileext, bbox_inches='tight')
figid += 1
pyplot.figure(figid)
key = "fav_u" + str(ifile)
pyplot.plot(y, fav_u[:,0], linewidth=3, label=key)
pyplot.legend(loc=0)
pyplot.savefig('fav_u.' + fileext, bbox_inches='tight')
figid += 1
pyplot.figure(figid)
key = "fav_H" + str(ifile)
pyplot.plot(y, fav_H[:,0], linewidth=3, label=key)
pyplot.legend(loc=0)
pyplot.savefig('fav_H.' + fileext, bbox_inches='tight')
if (plot_all):
figid += 1
pyplot.figure(figid)
key = "fav_H_yy" + str(ifile)
pyplot.plot(y, fav_H_yy[:,0], linewidth=0.1, label=key)
pyplot.axhline(linewidth=0.1, color='r')
pyplot.legend(loc=0)
pyplot.savefig('fav_H_yy.' + fileext, bbox_inches='tight')
def main(argv=None):
# Permit interactive use
if argv is None:
argv = sys.argv
# File extension (eps is default)
fileext="eps"
# Plot all stuff
# ... include "debug" type plots one may want to see/declare
plot_all = False
# Parse and check incoming command line arguments
try:
try:
opts, args = getopt.getopt(argv[1:], "hf:n", ["help", "file_ext=", "plot_all"])
except getopt.error as msg:
raise Usage(msg)
for o, a in opts:
if o in ("-h", "--help"):
print __doc__
return 0
if o in ("-f", "--file_ext"):
fileext=a
if o in ("-f", "--plot_all"):
plot_all = True
if len(args) < 1:
print >>sys.stderr, "Incorrect number of arguments. See --help."
return 2
except Usage as err:
print >>sys.stderr, err.msg
return 2
# Process each file in turn
hdf5files = args
# Plot multiple files
ifile = 0
for hdf5file in hdf5files:
plot(hdf5file, fileext, ifile, plot_all)
ifile += 1
if __name__ == "__main__":
sys.exit(main())
| gpl-3.0 |
madjelan/scikit-learn | sklearn/linear_model/bayes.py | 220 | 15248 | """
Various bayesian regression
"""
from __future__ import print_function
# Authors: V. Michel, F. Pedregosa, A. Gramfort
# License: BSD 3 clause
from math import log
import numpy as np
from scipy import linalg
from .base import LinearModel
from ..base import RegressorMixin
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_X_y
###############################################################################
# BayesianRidge regression
class BayesianRidge(LinearModel, RegressorMixin):
"""Bayesian ridge regression
Fit a Bayesian ridge model and optimize the regularization parameters
lambda (precision of the weights) and alpha (precision of the noise).
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300.
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter.
Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter.
Default is 1.e-6
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.BayesianRidge()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
BayesianRidge(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, tol=0.001, verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
-----
See examples/linear_model/plot_bayesian_ridge.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
fit_intercept=True, normalize=False, copy_X=True,
verbose=False):
self.n_iter = n_iter
self.tol = tol
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the model
Parameters
----------
X : numpy array of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples]
Target values
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
n_samples, n_features = X.shape
### Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = 1.
verbose = self.verbose
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
self.scores_ = list()
coef_old_ = None
XT_y = np.dot(X.T, y)
U, S, Vh = linalg.svd(X, full_matrices=False)
eigen_vals_ = S ** 2
### Convergence loop of the bayesian ridge regression
for iter_ in range(self.n_iter):
### Compute mu and sigma
# sigma_ = lambda_ / alpha_ * np.eye(n_features) + np.dot(X.T, X)
# coef_ = sigma_^-1 * XT * y
if n_samples > n_features:
coef_ = np.dot(Vh.T,
Vh / (eigen_vals_ + lambda_ / alpha_)[:, None])
coef_ = np.dot(coef_, XT_y)
if self.compute_score:
logdet_sigma_ = - np.sum(
np.log(lambda_ + alpha_ * eigen_vals_))
else:
coef_ = np.dot(X.T, np.dot(
U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T))
coef_ = np.dot(coef_, y)
if self.compute_score:
logdet_sigma_ = lambda_ * np.ones(n_features)
logdet_sigma_[:n_samples] += alpha_ * eigen_vals_
logdet_sigma_ = - np.sum(np.log(logdet_sigma_))
### Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = (np.sum((alpha_ * eigen_vals_)
/ (lambda_ + alpha_ * eigen_vals_)))
lambda_ = ((gamma_ + 2 * lambda_1)
/ (np.sum(coef_ ** 2) + 2 * lambda_2))
alpha_ = ((n_samples - gamma_ + 2 * alpha_1)
/ (rmse_ + 2 * alpha_2))
### Compute the objective function
if self.compute_score:
s = lambda_1 * log(lambda_) - lambda_2 * lambda_
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (n_features * log(lambda_)
+ n_samples * log(alpha_)
- alpha_ * rmse_
- (lambda_ * np.sum(coef_ ** 2))
- logdet_sigma_
- n_samples * log(2 * np.pi))
self.scores_.append(s)
### Check for convergence
if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Convergence after ", str(iter_), " iterations")
break
coef_old_ = np.copy(coef_)
self.alpha_ = alpha_
self.lambda_ = lambda_
self.coef_ = coef_
self._set_intercept(X_mean, y_mean, X_std)
return self
###############################################################################
# ARD (Automatic Relevance Determination) regression
class ARDRegression(LinearModel, RegressorMixin):
"""Bayesian ARD regression.
Fit the weights of a regression model, using an ARD prior. The weights of
the regression model are assumed to be in Gaussian distributions.
Also estimate the parameters lambda (precisions of the distributions of the
weights) and alpha (precision of the distribution of the noise).
The estimation is done by an iterative procedures (Evidence Maximization)
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6.
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter. Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter. Default is 1.e-6.
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False.
threshold_lambda : float, optional
threshold for removing (pruning) weights with high precision from
the computation. Default is 1.e+4.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True.
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
sigma_ : array, shape = (n_features, n_features)
estimated variance-covariance matrix of the weights
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.ARDRegression()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
ARDRegression(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, threshold_lambda=10000.0, tol=0.001,
verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
--------
See examples/linear_model/plot_ard.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
threshold_lambda=1.e+4, fit_intercept=True, normalize=False,
copy_X=True, verbose=False):
self.n_iter = n_iter
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.threshold_lambda = threshold_lambda
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the ARDRegression model according to the given training data
and parameters.
Iterative procedure to maximize the evidence
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
n_samples, n_features = X.shape
coef_ = np.zeros(n_features)
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
### Launch the convergence loop
keep_lambda = np.ones(n_features, dtype=bool)
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
verbose = self.verbose
### Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = np.ones(n_features)
self.scores_ = list()
coef_old_ = None
### Iterative procedure of ARDRegression
for iter_ in range(self.n_iter):
### Compute mu and sigma (using Woodbury matrix identity)
sigma_ = pinvh(np.eye(n_samples) / alpha_ +
np.dot(X[:, keep_lambda] *
np.reshape(1. / lambda_[keep_lambda], [1, -1]),
X[:, keep_lambda].T))
sigma_ = np.dot(sigma_, X[:, keep_lambda]
* np.reshape(1. / lambda_[keep_lambda], [1, -1]))
sigma_ = - np.dot(np.reshape(1. / lambda_[keep_lambda], [-1, 1])
* X[:, keep_lambda].T, sigma_)
sigma_.flat[::(sigma_.shape[1] + 1)] += 1. / lambda_[keep_lambda]
coef_[keep_lambda] = alpha_ * np.dot(
sigma_, np.dot(X[:, keep_lambda].T, y))
### Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = 1. - lambda_[keep_lambda] * np.diag(sigma_)
lambda_[keep_lambda] = ((gamma_ + 2. * lambda_1)
/ ((coef_[keep_lambda]) ** 2
+ 2. * lambda_2))
alpha_ = ((n_samples - gamma_.sum() + 2. * alpha_1)
/ (rmse_ + 2. * alpha_2))
### Prune the weights with a precision over a threshold
keep_lambda = lambda_ < self.threshold_lambda
coef_[~keep_lambda] = 0
### Compute the objective function
if self.compute_score:
s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum()
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (fast_logdet(sigma_) + n_samples * log(alpha_)
+ np.sum(np.log(lambda_)))
s -= 0.5 * (alpha_ * rmse_ + (lambda_ * coef_ ** 2).sum())
self.scores_.append(s)
### Check for convergence
if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Converged after %s iterations" % iter_)
break
coef_old_ = np.copy(coef_)
self.coef_ = coef_
self.alpha_ = alpha_
self.sigma_ = sigma_
self.lambda_ = lambda_
self._set_intercept(X_mean, y_mean, X_std)
return self
| bsd-3-clause |
fmv1992/data_utilities | data_utilities/matplotlib_utilities.py | 1 | 29366 | """Matplotlib utilities for common plotting procedures.
All the functions should follow matplotlib, pandas and numpy's guidelines:
Pandas:
(1) Return a copy of the object; use keyword argument 'inplace' if
changing is to be done inplace.
Numpy:
(1) Use the 'size' or 'shape' interface to determine the size of
arrays.
This module:
(1) Functions should work out of the box whenever possible (for example
for creating dataframes).
(2) Keyword arguments should be turned off by default.
"""
import itertools
import os
import random
import warnings
from data_utilities.pandas_utilities import object_columns_to_category
from mpl_toolkits.axes_grid1 import make_axes_locatable
from mpl_toolkits.mplot3d import Axes3D
import sklearn.preprocessing
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
# pylama: ignore=W0611,D301
# pylama: ignore=D406,D407 # TODO: add this too
def scale_axes_axis(axes, scale_xy_axis=False, scale_z_axis=False):
"""Set axes to the same scale.
Arguments:
axes (matplotlib.axes.Axes): input axes to have its axis scaled.
scale_xy_axis (bool): True to scale both x and y axis.
scale_z_axis (bool): True to scale z axis.
Returns:
None: axes are scaled inplace.
Examples:
>>> import matplotlib_utilities as mu
>>> from mpl_toolkits.mplot3d import Axes3D
>>> from mpl_toolkits.mplot3d.art3d import Path3DCollection
>>> x = np.arange(0, 5)
>>> y = x.copy()
>>> xx, yy = np.meshgrid(x, y)
>>> z = xx * yy
>>> fig = plt.figure()
>>> ax = fig.gca(projection='3d')
>>> isinstance(ax.scatter3D(xx, yy, z), Path3DCollection)
True
>>> mu.scale_axes_axis(ax, scale_xy_axis=True, scale_z_axis=False)
>>> fig.tight_layout()
>>> fig.savefig('/tmp/doctest_{0}.png'.format('scale_axes_axis'), \
dpi=500)
"""
if hasattr(axes, 'get_zlim'):
dimenesions = 3
else:
dimenesions = 2
xlim = tuple(axes.get_xlim3d())
ylim = tuple(axes.get_ylim3d())
zlim = tuple(axes.get_zlim3d())
xmean = np.mean(xlim)
ymean = np.mean(ylim)
zmean = np.mean(zlim)
# Get the maximum absolute difference between the limits in all 3 axis and
# the mean of the respective axis.
plot_radius = max(abs(lim - mean_)
for lims, mean_ in ((xlim, xmean),
(ylim, ymean),
(zlim, zmean))
for lim in lims)
# Set the span of the axis to be 2 * plot radius for all the plots.
if scale_xy_axis:
axes.set_xlim3d([xmean - plot_radius, xmean + plot_radius])
axes.set_ylim3d([ymean - plot_radius, ymean + plot_radius])
if dimenesions == 3 and scale_z_axis:
axes.set_zlim3d([zmean - plot_radius, zmean + plot_radius])
return None
def plot_3d(series,
colormap_callable=plt.cm.viridis,
include_colorbar=False):
"""Create a 3d-barchart axes for a given 2-level-multi-index series.
Return a 3d axes object given a series with a multiindex with 2
categorical levels.
Arguments:
series (pandas.Series): the 2-level-index series to generate the plot.
colormap_callable (matplotlib.colors.ListedColormap): Colormap object
generated from a list of colors.
include_colorbar (bool): True to include a colorbar.
Returns:
matplotlib.axes.Axes: the 3d axis object.
Examples:
>>> import itertools
>>> import pandas as pd
>>> fig = plt.figure()
>>> s_index = pd.MultiIndex.from_tuples( \
tuple(itertools.product(range(6), list('abc'))), \
names=('x1', 'x2'))
>>> s = pd.Series(data=np.arange(18), index=s_index)
>>> ax = plot_3d(s, include_colorbar=True)
>>> fig.tight_layout()
>>> fig.savefig('/tmp/{0}.png'.format('plot3d'), dpi=500)
"""
# Create a copy of the list to avoid changing the original.
series = series.copy(deep=True)
series.sort_values(inplace=True, ascending=False)
# Set constants.
# Some groupby objects will produce a dataframe. Not nice going over duck
# typing but oh well...
# If it is a dataframe with one column then transform it to series.
if isinstance(series, pd.DataFrame) and series.shape[1] == 1:
series = series.ix[:, 0]
# Error handling phase.
# Track if index has correct shape.
if len(series.index.levshape) != 2:
raise ValueError('The index level shape should '
'be 2 and it is {}.'.format(series.index.levshape))
# Check for duplicate indexes.
if series.index.duplicated().sum():
series = series.groupby(level=series.index.names).sum()
if series.index.duplicated().sum():
raise ValueError('series has duplicate values.')
# Handling the index of the received series.
level1_index, level2_index = tuple(zip(*series.index.get_values()))
level1_index = sorted(set(level1_index))
level2_index = sorted(set(level2_index))
# Populate the series with all index combinations, even if they are zero.
all_index_combinations = tuple(itertools.product(
level1_index,
level2_index))
index_names = series.index.names
new_index = pd.MultiIndex.from_tuples(all_index_combinations,
names=index_names)
all_values_series = pd.Series(0, index=new_index, name=series.name)
series = (series + all_values_series).fillna(0)
# Generate the z values
z_list = []
for _, group in series.groupby(level=1):
z_list.append(group)
z_list = np.hstack(z_list).ravel()
z = z_list
del z_list
# Starts manipulating the axes
fig = plt.gcf()
ax = fig.add_subplot(111, projection='3d')
# Create the axis and their labels
xlabels = series.index.get_level_values(index_names[0]).unique().tolist()
ylabels = series.index.get_level_values(index_names[1]).unique().tolist()
xlabels = [''.join(list(filter(str.isalnum, str(value))))
for value in xlabels]
ylabels = [''.join(list(filter(str.isalnum, str(value))))
for value in ylabels]
x = np.arange(len(xlabels))
y = np.arange(len(ylabels))
xlabels = [z.title() for z in xlabels]
ylabels = [z.title() for z in ylabels]
# Adjust tick posistions and labels.
ax.w_xaxis.set_ticks(x + 0.5/2.)
ax.w_yaxis.set_ticks(y + 0.5/2.)
ax.w_yaxis.set_ticklabels(ylabels)
# Color.
pp_color_values = sklearn.preprocessing.minmax_scale(z)
colormap = colormap_callable(pp_color_values)
# Create the 3d plot.
x_mesh, y_mesh = np.meshgrid(x, y, copy=False)
ax.bar3d(x_mesh.ravel(), y_mesh.ravel(), z*0,
dx=0.5, dy=0.5, dz=z,
color=colormap)
# Set convenient z limits.
# From z ticks make it include all extreme values in excess of 0.5 tick.
z_min = z.min()
z_max = z.max()
z_ticks = ax.get_zticks()
z_stride = z_ticks[1] - z_ticks[0]
z_min_lim = z_min - 0.5 * z_stride
z_max_lim = z_max + 0.5 * z_stride
if 0 < z_min_lim:
z_min_lim = 0
elif 0 > z_max_lim:
z_max_lim = 0
ax.set_zlim3d(z_min_lim, z_max_lim)
if include_colorbar:
scalar_mappable = plt.cm.ScalarMappable(cmap=colormap_callable)
scalar_mappable.set_array([min(z), max(z)])
scalar_mappable.set_clim(vmin=min(z), vmax=max(z))
ticks = np.linspace(z_min, z_max, 5)
colorbar = fig.colorbar(scalar_mappable, drawedges=True, ticks=ticks)
# Add border to the colorbar.
colorbar.outline.set_visible(True)
colorbar.outline.set_edgecolor('black')
mpl_params = matplotlib.rc_params()
colorbar.outline.set_linewidth(mpl_params['lines.linewidth'])
# Add ticks to the colorbar.
colorbar.ax.yaxis.set_tick_params(
width=mpl_params['ytick.major.width'],
size=mpl_params['ytick.major.size'])
return ax
def label_containers(axes,
containers=None,
string_formatting=None,
label_height_increment=0.01,
adjust_yaxis_limit=True):
"""Attach text labels to axes.
Arguments:
axes (matplotlib.axes.Axes): Axes in which text labels will be added.
containers (list): List of matplotlib.container.Container objects.
string_fomratting (str): string that will be passed to str.format
function.
label_height_increment (float): height to increment the label to avoid
coincidence with bar's top line.
Returns:
list: list of text objects generated by the axes.text method.
Examples:
>>> import matplotlib.pyplot as plt
>>> from matplotlib_utilities import label_containers
>>> fig = plt.figure(num=0, figsize=(12, 9))
>>> ax = fig.add_subplot(1,1,1)
>>> x = range(10)
>>> y = range(10)
>>> isinstance(ax.bar(x, y), matplotlib.container.Container)
True
>>> isinstance(label_containers(ax), list)
True
>>> fig.tight_layout()
>>> fig.savefig('/tmp/{0}.png'.format('label_containers'))
"""
if containers is None:
containers = axes.containers[0]
height = np.fromiter(
(x.get_height() for x in containers), float)
if string_formatting is None:
if np.all(np.equal(np.mod(height, 1), 0)):
string_formatting = '{0:d}'
height = height.astype('int')
else:
string_formatting = '{0:1.1f}'
height_increment = height.max() * label_height_increment
# Adjust y axis limit to avoid text out of chart area.
if adjust_yaxis_limit:
y0, y1 = axes.get_ylim()
y1 += 2 * height_increment
axes.set_ylim(y0, y1)
# Plot the labels.
bar_labels = []
for i, rect in enumerate(containers):
label_height = height[i] + height_increment
text = axes.text(
rect.get_x() + rect.get_width()/2.,
label_height,
string_formatting.format(height[i]),
ha='center', va='bottom')
bar_labels.append(text)
return bar_labels
def histogram_of_categorical(a,
*args,
**sns_distplot_kwargs):
"""Plot a histogram of categorical with sane defauts.
Arguments:
a (pd.Series): Categorical series to create a histogram plot.
Returns:
matplotlib.axes.Axes: the plotted axes.
Examples:
>>> import pandas_utilities as pu
>>> cat_serie = pu.dummy_dataframe().categorical_0
>>> fig = plt.figure()
>>> axes = histogram_of_categorical(cat_serie, kde=False)
>>> isinstance(axes, matplotlib.axes.Axes)
True
>>> fig.savefig('/tmp/doctest_{0}.png'.format( \
'histogram_of_categorical'), dpi=500)
"""
# Create a dictionary of labels from categories.
labels = dict(enumerate(a.cat.categories))
# Pass the arguments to the histogram of integers function.
axes = histogram_of_integers(a.cat.codes, *args, **sns_distplot_kwargs)
# Restore the labels.
new_labels = tuple(map(
lambda x: labels[x] if x in labels.keys() else '',
axes.get_xticks()))
# Set rotation for the ticklabels.
# TODO: only set rotation in one place.
if any(map(lambda x: x > 4, labels)):
rotation = -90
else:
rotation = 0
axes.set_xticklabels(new_labels, rotation=rotation)
return axes
def histogram_of_floats(a,
*args,
**sns_distplot_kwargs):
"""Plot a histogram of floats with sane defauts.
Arguments:
a (pd.Series): Float series to create a histogram plot.
Returns:
matplotlib.axes.Axes: the plotted axes.
Examples:
>>> import pandas_utilities as pu
>>> float_serie = pu.dummy_dataframe().float_0
>>> fig = plt.figure()
>>> axes = histogram_of_floats(float_serie, kde=False)
>>> isinstance(axes, matplotlib.axes.Axes)
True
>>> fig.savefig('/tmp/doctest_{0}.png'.format( \
'histogram_of_floats'), dpi=500)
"""
axes = sns.distplot(
a,
*args,
**sns_distplot_kwargs)
return axes
def histogram_of_integers(a,
*args,
**sns_distplot_kwargs):
"""Plot a histogram of integers with sane defauts.
Arguments:
a (pd.Series): Integer series to create a histogram plot.
Returns:
matplotlib.axes.Axes: the plotted axes.
Examples:
>>> import pandas_utilities as pu
>>> int_serie = pu.dummy_dataframe().int_0
>>> fig = plt.figure()
>>> axes = histogram_of_integers(int_serie, kde=False)
>>> isinstance(axes, matplotlib.axes.Axes)
True
>>> fig.savefig('/tmp/doctest_{0}.png'.format( \
'histogram_of_ints'), dpi=500)
"""
# Data transformation:
if not isinstance(a, pd.Series):
a = pd.Series(a)
# If there are various different integers plot them as float.
THRESHOLD_TO_CONSIDER_FLOAT = 100
unique = np.unique(a).shape[0]
if unique > THRESHOLD_TO_CONSIDER_FLOAT:
return histogram_of_floats(
a,
*args,
**sns_distplot_kwargs)
# Mask values if the range between maximum and minimum is too big.
if a.max() - a.min() > THRESHOLD_TO_CONSIDER_FLOAT:
unique_values = np.sort(a.unique())
mask_values = dict(zip(unique_values, range(len(unique_values))))
a = a.map(mask_values)
# Specify default options for histogram.
if 'hist_kws' not in sns_distplot_kwargs:
sns_distplot_kwargs['hist_kws'] = dict()
hist_kws = sns_distplot_kwargs['hist_kws']
DEFAULT_HIST_KWARGS = {
'align': 'mid',
'rwidth': 0.5}
# Update kwargs to matplotlib histogram which were not specified.
for absent_key in filter(lambda x: x not in
hist_kws.keys(),
DEFAULT_HIST_KWARGS.keys()):
hist_kws[absent_key] = DEFAULT_HIST_KWARGS[absent_key]
xlabels = np.arange(a.min() - 2,
a.max() + 3)
axes = sns.distplot(
a,
bins=xlabels - 0.5,
*args,
**sns_distplot_kwargs)
axes.set_xticks(xlabels)
# If it is the case of having mapped the values.
try:
mask_values
a = a.map({v: k for k, v in mask_values.items()})
xlabels = np.concatenate((
np.arange(a.min() - 2, a.min()),
np.sort(np.unique(a)),
np.arange(a.max() + 1, a.max() + 3)))
except NameError:
pass
# Apply rotation to labels if they are numerous.
# TODO: rotate categorical names if they are too many.
if max(a) >= 100:
rotation = -45
else:
rotation = 0
axes.set_xticklabels(xlabels, rotation=rotation)
return axes
def histogram_of_dataframe(dataframe,
output_path=None,
weights=None,
*args,
**sns_distplot_kwargs):
"""Draw a histogram for each column of the dataframe.
Provide a quick summary of each series in the dataframe:
- Draw a histogram for each column of the dataframe using the seaborn
'distplot' function.
- Create an artist box with some summary statistics:
- max
- min
- average
- nans
- n
The dataframe may contain nans.
This function assumes that the input dataframe has already received
treatment such as outlier treatment.
Arguments:
dataframe (pandas.DataFrame): The dataframe whose columns will be
plotted.
output_path (str): The outputh path to place the plotted histograms.
If None then no file is written.
weights (list): The list of numpy.array weights to weight each of the
histogram entry.
Returns:
tuple: a tuple containing a figure and an axes for each dataframe.
Examples:
>>> from data_utilities import pandas_utilities as pu
>>> from data_utilities import matplotlib_utilities as mu
>>> dummy_df = pu.dummy_dataframe(shape=200)
>>> df_columns = tuple(x for x in dummy_df.columns if 'object_' \
not in x)
>>> dummy_df = dummy_df.loc[:, df_columns]
>>> isinstance(mu.histogram_of_dataframe(dummy_df, '/tmp/'), tuple)
True
"""
# This function assumes that the dataframe has received treatment. If there
# is an object column then raise exceptions. However nan's are welcome as
# they are part of the informative box.
if (dataframe.dtypes == object).sum() > 0:
raise TypeError("Dataframe must not have object columns:\n{0:d}.",
dataframe.dtypes)
n_nulls = dataframe.isnull().sum().sum()
if n_nulls > 0:
warnings.warn("Dataframe has {0:d} null values.".format(n_nulls),
UserWarning)
del n_nulls
list_of_figures = list()
# Iterate over columns.
for i, column in enumerate(dataframe.columns):
fig, axes = plt.subplots(nrows=1, ncols=1)
series = dataframe[column]
# Since numpy dtypes seem not to be organized in a hierarchy of data
# types (eg int8, int32 etc are instances of a int) we resort to a
# string representation of data types.
series_str_dtype = str(series.dtypes)
# .
# ├── categorical (x)
# └── number
# ├── bool
# ├── float
# ├── datetime
# └── int
if series_str_dtype == 'category':
axes = histogram_of_categorical(
series, **sns_distplot_kwargs)
# .
# ├── categorical
# └── number (x)
# ├── bool
# ├── float
# ├── datetime
# └── int
#
# Series with nans cannot be passed to sns.distplot. So this should be
# sent separetely to add_summary_statistics_textbox
elif ('bool' in series_str_dtype or 'int' in series_str_dtype or
'float' in series_str_dtype or 'datetime' in series_str_dtype):
# Null values if passed to seaborn.distplot raise ValueError.
series_not_null = series[~series.isnull()]
# .
# ├── categorical
# └── number
# ├── bool (x)
# ├── float
# ├── datetime
# └── int
if 'bool' in series_str_dtype:
axes = histogram_of_categorical(
series_not_null.astype('category'),
**sns_distplot_kwargs)
# .
# ├── categorical
# └── number
# ├── bool
# ├── float (x)
# ├── datetime
# └── int
if 'float' in series_str_dtype:
axes = histogram_of_floats(
series_not_null,
**sns_distplot_kwargs)
# TODO: improve
# .
# ├── categorical
# └── number
# ├── bool
# ├── float
# ├── datetime (x)
# └── int
if 'datetime' in series_str_dtype:
series_not_null = pd.to_numeric(series_not_null)
series = pd.to_numeric(series) # TODO XXX to fix
# add_summ_tbox on datetime
axes = histogram_of_floats(
series_not_null,
**sns_distplot_kwargs)
# .
# ├── categorical
# └── number
# ├── bool
# ├── float
# ├── datetime
# └── int (x)
if 'int' in series_str_dtype:
axes = histogram_of_integers(
series_not_null,
**sns_distplot_kwargs)
# Add summary statistics for all numeric cases.
text = add_summary_statistics_textbox(series, axes) # noqa
# TODO: having problems displays text boxes for Coursera Capstone
# Project.
# If it is neither a number nor a categorical data type raise error.
else:
raise TypeError("Datatype {0} not covered in this"
" function".format(series_str_dtype))
# Adjust figure size.
PATCHES_LEN = len(axes.patches)
PATCHES_STRIDE = 0.2
FIGSIZE = fig.get_size_inches()
fig.set_size_inches(
FIGSIZE[0] + PATCHES_LEN * PATCHES_STRIDE,
FIGSIZE[1]
)
list_of_figures.append(fig)
# Save the plotting.
if output_path is not None:
fig.tight_layout()
fig.savefig(
os.path.join(
output_path,
'{0}'.format(column)) + '.png',
dpi=300)
plt.close(fig)
return tuple(list_of_figures)
def add_summary_statistics_textbox(series,
axes,
include_mean=True,
include_max=True,
include_min=True,
include_n=True,
include_nans=True,
include_stddevs=True,
include_stddevp=False):
"""Add a summary statistic textbox to your figure.
Arguments:
series (pd.Series): Series to have summary statistics computed on.
axes (matplotlib.axes.Axes): axes which will receive the text.
various inclues (bool): To include or not to include various summary
statistics.
Returns:
matplotlib.text.Text: The drawed text object.
Examples:
>>> import pandas_utilities as pu
>>> serie = pu.dummy_dataframe().int_0
>>> fig = plt.figure()
>>> axes = histogram_of_integers(serie, kde=False)
>>> text = add_summary_statistics_textbox(serie, axes)
>>> fig.savefig('/tmp/doctest_{0}.png'.format( \
'add_summary_statistics_textbox'), dpi=500)
"""
def find_best_placement_for_summary_in_histogram(axes):
"""Find the best placement for summary in histogram.
Arguments:
axes (matplotlib.axes.Axes): histogram axes with the patches
properties.
Returns:
tuple: A tuple with the (x, y) coordinates for box placement.
"""
# Find best position for the text box.
#
# This session of the code finds the best placement of the text box. It
# works by finding a sequence of patches that are either above half the
# y axis or below it. If it finds such a sequences then it places the
# box halfway of the first patch of this sequence. This minimizes the
# chances of having it placed in an unsuitable positon.
n_bins = len(axes.patches)
stride = axes.patches[0].get_width()
hist_xlim = (axes.patches[0].get_x(), axes.patches[0].get_x() + n_bins
* stride)
x0 = hist_xlim[0]
y_half = axes.get_ylim()[1] / 2
fraction_of_patches_to_halt = 1/4
contiguous_patches_to_halt = int(n_bins * fraction_of_patches_to_halt)
patches_height = (x.get_height() for x in axes.patches)
height_greater_than_half = map(lambda x: x > y_half, patches_height)
state = height_greater_than_half.__next__()
seq = 1
flipped_on = 1
for i, greater in enumerate(height_greater_than_half, 1):
if greater == state:
seq += 1
else:
seq = 1
state = greater
flipped_on = i
if seq >= contiguous_patches_to_halt:
if greater:
y_placement = 0.3 # as a fraction of the ylimits.
else:
y_placement = 0.95 # as a fraction of the ylimits.
# Place the box on the best place: half stride in the patch
# which happened to 'flip' (y_half_greater -> y_half_smaller or
# vice versa).
x_placement = ((i - contiguous_patches_to_halt + flipped_on) *
stride + x0 + 0.5 * stride)
break
else:
# TODO: not elegant at all.
try:
x_placement
except NameError:
x_placement = 0.05
try:
y_placement
except NameError:
y_placement = 0.95
axes_ylim = axes.get_ylim()
# Correct the placement of the box to absolute units.
y_placement = axes_ylim[0] + y_placement * (axes_ylim[1] -
axes_ylim[0])
return (x_placement, y_placement)
if not isinstance(axes, matplotlib.axes.Axes):
axes = plt.gca()
mean = series.mean()
summary_max = series.max()
summary_min = series.min()
n = series.shape[0]
nans = series.isnull().sum()
stddevs = series.std(ddof=1)
stddevp = series.std(ddof=0)
# Numbers and figures should have the same order of magnitude.
# That is, avoid:
# mean: 1e7
# std: 1e5
# max: 1e9
metrics = np.fromiter(
(x for x in
(mean, summary_max, summary_min, stddevs, stddevp)
if x != 0), # Removes the divide by zero warning.
dtype=float)
if len(metrics) != 0:
min_order = np.floor(np.log10(np.abs(metrics))).min()
if abs(min_order) == float('inf'):
min_order = 0
else:
min_order = 0
min_order = np.int(min_order)
expo = 10 ** min_order
# Float.
text_mean = ('mean = {0:1.2f}' + 'e{1:d}').format(mean/expo, min_order)
text_max = ('max = {0:1.2f}' + 'e{1:d}').format(summary_max/expo,
min_order)
text_min = ('min = {0:1.2f}' + 'e{1:d}').format(summary_min/expo,
min_order)
text_stddevp = ('stddevp = {0:1.2f}' + 'e{1:d}').format(stddevp/expo,
min_order)
# Integers.
text_n = 'n = {0:d}'.format(n)
text_nans = 'nans = {0:d} ({1:1.1%} of n)'.format(nans, nans/n)
text = (text_mean, text_max, text_min, text_n, text_nans, text_stddevp)
if axes.patches:
x_placement, y_placement = \
find_best_placement_for_summary_in_histogram(axes)
else:
offset = .1
x_placement, y_placement = offset, 1 - offset
# Set the box style for the text.
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
# Place the text.
text = axes.text(
x_placement, y_placement,
'\n'.join(text),
verticalalignment='top',
alpha=0.5,
transform=axes.transAxes, # make it relative to axes coords.
bbox=props)
return text
def list_usable_backends():
"""List all usable backends for matplotlib.
Arguments:
(empty)
Returns:
list: a list of usable backends for current environment.
Examples:
>>> import matplotlib_utilities as mu
>>> available_backends = mu.list_usable_backends()
>>> 'agg' in available_backends
True
"""
backend_string = ("import matplotlib; matplotlib.use(\"{0}\");"
"import matplotlib.pyplot as plt")
command_string = 'python3 -c \'{0}\' 2>/dev/null'
usable_backends = []
for backend in matplotlib.rcsetup.all_backends:
backend_call = backend_string.format(backend)
command_call = command_string.format(backend_call)
return_value = os.system(command_call)
if return_value == 0:
usable_backends.append(backend)
return usable_backends
color = {
'standard': (223, 229, 239),
'gold': (255, 200, 31),
'platinum': (192, 192, 192),
'black': (0, 0, 0),
'pseudo_black': (90, 90, 90),
'business': (119, 202, 141),
}
color = {k: (v[0]/255, v[1]/255, v[2]/255) for k, v in color.items()}
if __name__ == '__main__':
import doctest
doctest.testmod()
# doctest.run_docstring_examples(func, globals())
| gpl-3.0 |
dblalock/bolt | tests/test_encoder.py | 1 | 9581 | #!/usr/bin/env python
from __future__ import print_function
import numpy as np
from sklearn.datasets import load_digits
import timeit
import bolt
# ================================================================ utils
def _dists_sq(X, q):
diffs = X - q
return np.sum(diffs * diffs, axis=-1)
def _dists_l1(X, q):
diffs = np.abs(X - q)
return np.sum(diffs, axis=-1)
def _element_size_bytes(x):
return np.dtype(x.dtype).itemsize
def _corr(x, y):
x, y = x.astype(np.float64), y.astype(np.float64)
x = x.ravel() - np.mean(x)
y = y.ravel() - np.mean(y)
r = np.mean(x * y) / (np.std(x) * np.std(y))
assert -1.00001 <= r <= 1.00001
return r
def _sq_dists_to_vectors(X, queries, rowNorms=None, queryNorms=None):
Q = queries.shape[0]
mat_size = X.shape[0] * Q
mat_size_bytes = _element_size_bytes(X[0] + queries[0])
if mat_size_bytes > int(1e9):
print("WARNING: _sq_dists_to_vectors: attempting to create a matrix"
"of size {} ({}B)".format(mat_size, mat_size_bytes))
if rowNorms is None:
rowNorms = np.sum(X * X, axis=1, keepdims=True)
if queryNorms is None:
queryNorms = np.sum(queries * queries, axis=1)
dotProds = np.dot(X, queries.T)
return (-2 * dotProds) + rowNorms + queryNorms # len(X) x len(queries)
def top_k_idxs(elements, k, smaller_better=True, axis=-1):
if smaller_better: # return indices of lowest elements
which_nn = np.arange(k)
return np.argpartition(elements, kth=which_nn, axis=axis)[:k]
else: # return indices of highest elements
which_nn = (elements.shape[axis] - 1 - np.arange(k))[::-1]
# print "elements.shape", elements.shape
# print "using which_nn: ", which_nn
return np.argpartition(elements, kth=which_nn, axis=axis)[-k:][::-1]
def _knn(X, Q, k=1000, print_every=5, block_sz=128):
nqueries = Q.shape[0]
nblocks = int(np.ceil(nqueries / float(block_sz)))
truth = np.full((nqueries, k), -999, dtype=np.int32)
if nqueries <= block_sz:
dists = _sq_dists_to_vectors(Q, X)
assert dists.shape == (Q.shape[0], X.shape[0])
for i in range(nqueries):
truth[i, :] = top_k_idxs(dists[i, :], k)
return truth
for b in range(nblocks):
# recurse to fill in knn for each block
start = b * block_sz
end = min(start + block_sz, nqueries)
rows = Q[start:end, :]
truth[start:end, :] = _knn(X, rows, k=k, block_sz=block_sz)
if b % print_every == 0:
print("computing top k for query block " \
"{} (queries {}-{})...".format(b, start, end))
assert np.all(truth != -999)
return truth
def _create_randn_encoder(Ntrain=100, Ntest=20, D=64):
enc = bolt.Encoder()
X_train = np.random.randn(Ntrain, D)
X_test = np.random.randn(Ntest, D)
enc.fit(X_train, just_train=True)
enc.set_data(X_test)
return enc
# ================================================================ tests
def test_smoketest():
"""Test that `bolt.Encoder`'s methods don't crash"""
D = 64
enc = _create_randn_encoder(D=D)
Nqueries = 5
Q = np.random.randn(Nqueries, D)
[enc.transform(q) for q in Q]
for k in [1, 3]:
[enc.knn(q, k) for q in Q]
def _fmt_float(x):
return '{}.'.format(int(x)) if x == int(x) else '{:.3f}'.format(x)
def _load_digits_X_Q(nqueries):
X, _ = load_digits(return_X_y=True)
return X[:-nqueries], X[-nqueries:] # X, Q
def test_time_space_savings(): # mostly to verify readme code
np.set_printoptions(formatter={'float_kind': _fmt_float})
nqueries = 20
X, Q = _load_digits_X_Q(nqueries)
enc = bolt.Encoder(accuracy='lowest', reduction=bolt.Reductions.DOT_PRODUCT)
enc.fit(X)
# massive space savings
print("original space usage: {}B".format(X.nbytes)) # 1777 * 64 * 8B = 909KB
print("bolt space usage: {}B".format(enc.nbytes)) # 1777 * 2B = 3.55KB
# massive time savings (~10x here, but often >100x on larger datasets
# with less Python overhead; see the Bolt paper)
t_np = timeit.Timer(lambda: [np.dot(X, q) for q in Q]).timeit(5) # ~8ms
t_bolt = timeit.Timer(lambda: [enc.transform(q) for q in Q]).timeit(5) # ~800us
print("Numpy / BLAS time, Bolt time: {:.3f}ms, {:.3f}ms".format(
t_np * 1000, t_bolt * 1000))
def test_unquantize():
X, Q = _load_digits_X_Q(nqueries=20)
enc = bolt.Encoder('dot', accuracy='high').fit(X)
dots_true = [np.dot(X, q) for q in Q]
dots_bolt = [enc.transform(q, unquantize=True) for q in Q]
diffs = [true_vals - bolt_vals
for true_vals, bolt_vals in zip(dots_true, dots_bolt)]
mse = np.mean([np.mean(diff*diff) for diff in diffs])
var = np.mean([np.var(true_vals) for true_vals in dots_true])
print("dot product unquantize mse / variance: ", mse / var)
assert (mse / var) < .01
# print "true, bolt dot prods"
# print dots_true[0][:20].astype(np.int32)
# print dots_bolt[0][:20].astype(np.int32)
enc = bolt.Encoder('l2', accuracy='high').fit(X)
dists_true = [_dists_sq(X, q) for q in Q]
dists_bolt = [enc.transform(q, unquantize=True) for q in Q]
diffs = [true_vals - bolt_vals
for true_vals, bolt_vals in zip(dists_true, dists_bolt)]
mse = np.mean([np.mean(diff*diff) for diff in diffs])
var = np.mean([np.var(true_vals) for true_vals in dots_true])
print("squared l2 unquantize mse / variance: ", mse / var)
assert (mse / var) < .01
def test_basic():
# np.set_printoptions(precision=3)
np.set_printoptions(formatter={'float_kind': _fmt_float})
nqueries = 20
# nqueries = 10
# nqueries = 3
X, Q = _load_digits_X_Q(nqueries)
# TODO rm this block
# shift = 100.
# shift = 100
# scaleby = 1.
# scaleby = 3.5 # acc goes to **** at accelerating rate as this gets larger...
# scaleby = 4
# scaleby = 1.0
# X, Q = X + shift, Q + shift
# X, Q = X * scaleby, Q * scaleby
# X = X[:200]
# X = X[:50]
# X = X[:20]
# X, _ = load_digits(return_X_y=True)
# Q = X[-nqueries:]
# X = X[:-nqueries]
# print "X.shape", X.shape
# print "X nbytes", X.nbytes
# ------------------------------------------------ squared l2
enc = bolt.Encoder(accuracy='low', reduction=bolt.Reductions.SQUARED_EUCLIDEAN)
enc.fit(X)
l2_corrs = np.empty(len(Q))
for i, q in enumerate(Q):
l2_true = _dists_sq(X, q).astype(np.int)
l2_bolt = enc.transform(q)
l2_corrs[i] = _corr(l2_true, l2_bolt)
if i == nqueries - 1:
print("l2 true: ", l2_true)
print("l2 bolt: ", l2_bolt)
print("corr: ", l2_corrs[i])
mean_l2 = np.mean(l2_corrs)
std_l2 = np.std(l2_corrs)
assert mean_l2 > .95
print("--> squared l2 dist correlation: {} +/- {}".format(mean_l2, std_l2))
# return
# ------------------------------------------------ dot product
enc = bolt.Encoder(accuracy='low', reduction=bolt.Reductions.DOT_PRODUCT)
enc.fit(X)
dot_corrs = np.empty(nqueries)
for i, q in enumerate(Q):
dots_true = np.dot(X, q)
dots_bolt = enc.transform(q)
dot_corrs[i] = _corr(dots_true, dots_bolt)
mean_dot = np.mean(dot_corrs)
std_dot = np.std(dot_corrs)
assert mean_dot > .95
print("--> dot product correlation: {} +/- {}".format(mean_dot, std_dot))
# ------------------------------------------------ l2 knn
enc = bolt.Encoder(accuracy='low', reduction='l2')
enc.fit(X)
k_bolt = 10 # tell bolt to search for true knn
k_true = 10 # compute this many true neighbors
true_knn = _knn(X, Q, k_true)
bolt_knn = [enc.knn(q, k_bolt) for q in Q]
contained = np.empty((nqueries, k_bolt), dtype=np.bool)
for i in range(nqueries):
true_neighbors = true_knn[i]
bolt_neighbors = bolt_knn[i]
for j in range(k_bolt):
contained[i, j] = bolt_neighbors[j] in true_neighbors
precision = np.mean(contained)
print("--> l2 knn precision@{}: {}".format(k_bolt, precision))
assert precision > .6
# # print "true_knn, bolt_knn:"
# # print true_knn[:20, :20]
# # print bolt_knn[:20]
# ------------------------------------------------ dot knn
enc = bolt.Encoder(accuracy='low', reduction='dot')
# enc = bolt.Encoder(accuracy='high', reduction='dot')
enc.fit(X)
k_bolt = 10 # tell bolt to search for true knn
k_true = 10 # compute this many true neighbors
true_dists = np.dot(X, Q.T)
# true_dists = [np.dot(X, q) for q in Q]
true_knn = np.empty((nqueries, k_true), dtype=np.int64)
for i in range(nqueries):
true_knn[i, :] = top_k_idxs(
true_dists[:, i], k_true, smaller_better=False)
bolt_knn = [enc.knn(q, k_bolt) for q in Q]
contained = np.empty((len(Q), k_bolt), dtype=np.bool)
for i in range(len(Q)):
true_neighbors = true_knn[i]
# bolt_dists = enc.transform(Q[i])
# bolt_neighbors = top_k_idxs(bolt_dists, k_bolt, smaller_better=True)
bolt_neighbors = bolt_knn[i] # TODO uncomment
for j in range(k_bolt):
contained[i, j] = bolt_neighbors[j] in true_neighbors
precision = np.mean(contained)
print("--> max inner product knn precision@{}: {}".format(
k_bolt, precision))
assert precision > .6
# print("true_knn, bolt_knn:")
# print(true_knn[:5])
# print(bolt_knn[:5])
if __name__ == '__main__':
test_basic()
| mpl-2.0 |
Laurae2/LightGBM | examples/python-guide/sklearn_example.py | 4 | 1449 | # coding: utf-8
# pylint: disable = invalid-name, C0111
import lightgbm as lgb
import pandas as pd
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import GridSearchCV
# load or create your dataset
print('Load data...')
df_train = pd.read_csv('../regression/regression.train', header=None, sep='\t')
df_test = pd.read_csv('../regression/regression.test', header=None, sep='\t')
y_train = df_train[0].values
y_test = df_test[0].values
X_train = df_train.drop(0, axis=1).values
X_test = df_test.drop(0, axis=1).values
print('Start training...')
# train
gbm = lgb.LGBMRegressor(objective='regression',
num_leaves=31,
learning_rate=0.05,
n_estimators=20)
gbm.fit(X_train, y_train,
eval_set=[(X_test, y_test)],
eval_metric='l1',
early_stopping_rounds=5)
print('Start predicting...')
# predict
y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration_)
# eval
print('The rmse of prediction is:', mean_squared_error(y_test, y_pred) ** 0.5)
# feature importances
print('Feature importances:', list(gbm.feature_importances_))
# other scikit-learn modules
estimator = lgb.LGBMRegressor(num_leaves=31)
param_grid = {
'learning_rate': [0.01, 0.1, 1],
'n_estimators': [20, 40]
}
gbm = GridSearchCV(estimator, param_grid)
gbm.fit(X_train, y_train)
print('Best parameters found by grid search are:', gbm.best_params_)
| mit |
kmike/scikit-learn | sklearn/manifold/tests/test_spectral_embedding.py | 6 | 8149 | import warnings
from nose.tools import assert_true
from nose.tools import assert_equal
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
import numpy as np
from numpy.testing import assert_array_almost_equal
from nose.tools import assert_raises
from nose.plugins.skip import SkipTest
from sklearn.manifold.spectral_embedding import SpectralEmbedding
from sklearn.manifold.spectral_embedding import _graph_is_connected
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics import normalized_mutual_info_score
from sklearn.cluster import KMeans
from sklearn.datasets.samples_generator import make_blobs
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 1000
n_clusters, n_features = centers.shape
S, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
def _check_with_col_sign_flipping(A, B, tol=0.0):
""" Check array A and B are equal with possible sign flipping on
each columns"""
sign = True
for column_idx in range(A.shape[1]):
sign = sign and ((((A[:, column_idx] -
B[:, column_idx]) ** 2).mean() <= tol ** 2) or
(((A[:, column_idx] +
B[:, column_idx]) ** 2).mean() <= tol ** 2))
if not sign:
return False
return True
def test_spectral_embedding_two_components(seed=36):
"""Test spectral embedding with two components"""
random_state = np.random.RandomState(seed)
n_sample = 100
affinity = np.zeros(shape=[n_sample * 2,
n_sample * 2])
# first component
affinity[0:n_sample,
0:n_sample] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# second component
affinity[n_sample::,
n_sample::] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# connection
affinity[0, n_sample + 1] = 1
affinity[n_sample + 1, 0] = 1
affinity.flat[::2 * n_sample + 1] = 0
affinity = 0.5 * (affinity + affinity.T)
true_label = np.zeros(shape=2 * n_sample)
true_label[0:n_sample] = 1
se_precomp = SpectralEmbedding(n_components=1, affinity="precomputed",
random_state=np.random.RandomState(seed))
embedded_coordinate = se_precomp.fit_transform(affinity)
# thresholding on the first components using 0.
label_ = np.array(embedded_coordinate.ravel() < 0, dtype="float")
assert_equal(normalized_mutual_info_score(true_label, label_), 1.0)
# test that we can still import spectral embedding
from sklearn.cluster import spectral_embedding as se_deprecated
with warnings.catch_warnings(record=True) as warning_list:
embedded_depr = se_deprecated(affinity, n_components=1,
random_state=np.random.RandomState(seed))
assert_equal(len(warning_list), 1)
assert_true(_check_with_col_sign_flipping(embedded_coordinate,
embedded_depr, 0.05))
def test_spectral_embedding_precomputed_affinity(seed=36):
"""Test spectral embedding with precomputed kernel"""
gamma = 1.0
se_precomp = SpectralEmbedding(n_components=2, affinity="precomputed",
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=2, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_precomp = se_precomp.fit_transform(rbf_kernel(S, gamma=gamma))
embed_rbf = se_rbf.fit_transform(S)
assert_array_almost_equal(
se_precomp.affinity_matrix_, se_rbf.affinity_matrix_)
assert_true(_check_with_col_sign_flipping(embed_precomp, embed_rbf, 0.05))
def test_spectral_embedding_callable_affinity(seed=36):
"""Test spectral embedding with callable affinity"""
gamma = 0.9
kern = rbf_kernel(S, gamma=gamma)
se_callable = SpectralEmbedding(n_components=2,
affinity=(
lambda x: rbf_kernel(x, gamma=gamma)),
gamma=gamma,
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=2, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_rbf = se_rbf.fit_transform(S)
embed_callable = se_callable.fit_transform(S)
embed_rbf = se_rbf.fit_transform(S)
embed_callable = se_callable.fit_transform(S)
assert_array_almost_equal(
se_callable.affinity_matrix_, se_rbf.affinity_matrix_)
assert_array_almost_equal(kern, se_rbf.affinity_matrix_)
assert_true(
_check_with_col_sign_flipping(embed_rbf, embed_callable, 0.05))
def test_spectral_embedding_amg_solver(seed=36):
"""Test spectral embedding with amg solver"""
try:
from pyamg import smoothed_aggregation_solver
except ImportError:
raise SkipTest
se_amg = SpectralEmbedding(n_components=2, affinity="nearest_neighbors",
eigen_solver="amg", n_neighbors=5,
random_state=np.random.RandomState(seed))
se_arpack = SpectralEmbedding(n_components=2, affinity="nearest_neighbors",
eigen_solver="arpack", n_neighbors=5,
random_state=np.random.RandomState(seed))
embed_amg = se_amg.fit_transform(S)
embed_arpack = se_arpack.fit_transform(S)
assert_true(_check_with_col_sign_flipping(embed_amg, embed_arpack, 0.05))
def test_pipline_spectral_clustering(seed=36):
"""Test using pipline to do spectral clustering"""
random_state = np.random.RandomState(seed)
se_rbf = SpectralEmbedding(n_components=n_clusters,
affinity="rbf",
random_state=random_state)
se_knn = SpectralEmbedding(n_components=n_clusters,
affinity="nearest_neighbors",
n_neighbors=5,
random_state=random_state)
for se in [se_rbf, se_knn]:
km = KMeans(n_clusters=n_clusters, random_state=random_state)
km.fit(se.fit_transform(S))
assert_array_almost_equal(
normalized_mutual_info_score(
km.labels_,
true_labels), 1.0, 2)
def test_spectral_embedding_unknown_eigensolver(seed=36):
"""Test that SpectralClustering fails with an unknown eigensolver"""
se = SpectralEmbedding(n_components=1, affinity="precomputed",
random_state=np.random.RandomState(seed),
eigen_solver="<unknown>")
assert_raises(ValueError, se.fit, S)
def test_spectral_embedding_unknown_affinity(seed=36):
"""Test that SpectralClustering fails with an unknown affinity type"""
se = SpectralEmbedding(n_components=1, affinity="<unknown>",
random_state=np.random.RandomState(seed))
assert_raises(ValueError, se.fit, S)
def test_connectivity(seed=36):
"""Test that graph connectivity test works as expected"""
graph = np.array([[1, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1]])
assert_equal(_graph_is_connected(graph), False)
assert_equal(_graph_is_connected(csr_matrix(graph)), False)
assert_equal(_graph_is_connected(csc_matrix(graph)), False)
graph = np.array([[1, 1, 0, 0, 0],
[1, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1]])
assert_equal(_graph_is_connected(graph), True)
assert_equal(_graph_is_connected(csr_matrix(graph)), True)
assert_equal(_graph_is_connected(csc_matrix(graph)), True)
| bsd-3-clause |
ricsatjr/mplstereonet | examples/axial_plane.py | 1 | 2063 | """
Illustrates fitting an axial plane to two clusters of dip measurements.
In this case, we're faking it by using Anglier's fault orientation data,
but pretend these were bedding dips in two limbs of a fold instead of fault
orientations.
The steps mimic what you'd do graphically:
1. Find the centers of the two modes of the bedding measurements
2. Fit a girdle to them to find the plunge axis of the fold
3. Find the midpoint along that girdle between the two centers
4. The axial plane will be the girdle that fits the midpoint and plunge
axis of the fold.
"""
import matplotlib.pyplot as plt
import mplstereonet
import parse_angelier_data
# Load data from Angelier, 1979
strike, dip, rake = parse_angelier_data.load()
# Plot the raw data and contour it:
fig, ax = mplstereonet.subplots()
ax.density_contour(strike, dip, rake, measurement='rakes', cmap='gist_earth',
sigma=1.5)
ax.rake(strike, dip, rake, marker='.', color='black')
# Find the two modes
centers = mplstereonet.kmeans(strike, dip, rake, num=2, measurement='rakes')
strike_cent, dip_cent = mplstereonet.geographic2pole(*zip(*centers))
ax.pole(strike_cent, dip_cent, 'ro', ms=12)
# Fit a girdle to the two modes
# The pole of this plane will be the plunge of the fold axis
axis_s, axis_d = mplstereonet.fit_girdle(*zip(*centers), measurement='radians')
ax.plane(axis_s, axis_d, color='green')
ax.pole(axis_s, axis_d, color='green', marker='o', ms=15)
# Now we'll find the midpoint. We could project the centers as rakes on the
# plane we just fit, but it's easier to get their mean vector instead.
mid, _ = mplstereonet.find_mean_vector(*zip(*centers), measurement='radians')
midx, midy = mplstereonet.line(*mid)
# Now let's find the axial plane by fitting another girdle to the midpoint
# and the pole of the plunge axis.
xp, yp = mplstereonet.pole(axis_s, axis_d)
x, y = [xp[0], midx], [yp[0], midy]
axial_s, axial_dip = mplstereonet.fit_girdle(x, y, measurement='radians')
ax.plane(axial_s, axial_dip, color='lightblue', lw=3)
plt.show()
| mit |
cauchycui/scikit-learn | sklearn/linear_model/logistic.py | 105 | 56686 | """
Logistic Regression
"""
# Author: Gael Varoquaux <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Manoj Kumar <[email protected]>
# Lars Buitinck
# Simon Wu <[email protected]>
import numbers
import warnings
import numpy as np
from scipy import optimize, sparse
from .base import LinearClassifierMixin, SparseCoefMixin, BaseEstimator
from ..feature_selection.from_model import _LearntSelectorMixin
from ..preprocessing import LabelEncoder, LabelBinarizer
from ..svm.base import _fit_liblinear
from ..utils import check_array, check_consistent_length, compute_class_weight
from ..utils import check_random_state
from ..utils.extmath import (logsumexp, log_logistic, safe_sparse_dot,
squared_norm)
from ..utils.optimize import newton_cg
from ..utils.validation import (as_float_array, DataConversionWarning,
check_X_y)
from ..utils.fixes import expit
from ..externals.joblib import Parallel, delayed
from ..cross_validation import check_cv
from ..externals import six
from ..metrics import SCORERS
# .. some helper functions for logistic_regression_path ..
def _intercept_dot(w, X, y):
"""Computes y * np.dot(X, w).
It takes into consideration if the intercept should be fit or not.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
"""
c = 0.
if w.size == X.shape[1] + 1:
c = w[-1]
w = w[:-1]
z = safe_sparse_dot(X, w) + c
return w, c, y * z
def _logistic_loss_and_grad(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss and gradient.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
"""
_, n_features = X.shape
grad = np.empty_like(w)
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if grad.shape[0] > n_features:
grad[-1] = z0.sum()
return out, grad
def _logistic_loss(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
"""
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
return out
def _logistic_grad_hess(w, X, y, alpha, sample_weight=None):
"""Computes the gradient and the Hessian, in the case of a logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
Hs : callable
Function that takes the gradient as a parameter and returns the
matrix product of the Hessian and gradient.
"""
n_samples, n_features = X.shape
grad = np.empty_like(w)
fit_intercept = grad.shape[0] > n_features
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if fit_intercept:
grad[-1] = z0.sum()
# The mat-vec product of the Hessian
d = sample_weight * z * (1 - z)
if sparse.issparse(X):
dX = safe_sparse_dot(sparse.dia_matrix((d, 0),
shape=(n_samples, n_samples)), X)
else:
# Precompute as much as possible
dX = d[:, np.newaxis] * X
if fit_intercept:
# Calculate the double derivative with respect to intercept
# In the case of sparse matrices this returns a matrix object.
dd_intercept = np.squeeze(np.array(dX.sum(axis=0)))
def Hs(s):
ret = np.empty_like(s)
ret[:n_features] = X.T.dot(dX.dot(s[:n_features]))
ret[:n_features] += alpha * s[:n_features]
# For the fit intercept case.
if fit_intercept:
ret[:n_features] += s[-1] * dd_intercept
ret[-1] = dd_intercept.dot(s[:n_features])
ret[-1] += d.sum() * s[-1]
return ret
return grad, Hs
def _multinomial_loss(w, X, Y, alpha, sample_weight):
"""Computes multinomial loss and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or (n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
loss : float
Multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities.
w : ndarray, shape (n_classes, n_features)
Reshaped param vector excluding intercept terms.
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
w = w.reshape(n_classes, -1)
sample_weight = sample_weight[:, np.newaxis]
if fit_intercept:
intercept = w[:, -1]
w = w[:, :-1]
else:
intercept = 0
p = safe_sparse_dot(X, w.T)
p += intercept
p -= logsumexp(p, axis=1)[:, np.newaxis]
loss = -(sample_weight * Y * p).sum()
loss += 0.5 * alpha * squared_norm(w)
p = np.exp(p, p)
return loss, p, w
def _multinomial_loss_grad(w, X, Y, alpha, sample_weight):
"""Computes the multinomial loss, gradient and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or (n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
loss : float
Multinomial loss.
grad : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = (w.size == n_classes * (n_features + 1))
grad = np.zeros((n_classes, n_features + bool(fit_intercept)))
loss, p, w = _multinomial_loss(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
diff = sample_weight * (p - Y)
grad[:, :n_features] = safe_sparse_dot(diff.T, X)
grad[:, :n_features] += alpha * w
if fit_intercept:
grad[:, -1] = diff.sum(axis=0)
return loss, grad.ravel(), p
def _multinomial_grad_hess(w, X, Y, alpha, sample_weight):
"""
Computes the gradient and the Hessian, in the case of a multinomial loss.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or (n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
grad : array, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
hessp : callable
Function that takes in a vector input of shape (n_classes * n_features)
or (n_classes * (n_features + 1)) and returns matrix-vector product
with hessian.
References
----------
Barak A. Pearlmutter (1993). Fast Exact Multiplication by the Hessian.
http://www.bcl.hamilton.ie/~barak/papers/nc-hessian.pdf
"""
n_features = X.shape[1]
n_classes = Y.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
# `loss` is unused. Refactoring to avoid computing it does not
# significantly speed up the computation and decreases readability
loss, grad, p = _multinomial_loss_grad(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
# Hessian-vector product derived by applying the R-operator on the gradient
# of the multinomial loss function.
def hessp(v):
v = v.reshape(n_classes, -1)
if fit_intercept:
inter_terms = v[:, -1]
v = v[:, :-1]
else:
inter_terms = 0
# r_yhat holds the result of applying the R-operator on the multinomial
# estimator.
r_yhat = safe_sparse_dot(X, v.T)
r_yhat += inter_terms
r_yhat += (-p * r_yhat).sum(axis=1)[:, np.newaxis]
r_yhat *= p
r_yhat *= sample_weight
hessProd = np.zeros((n_classes, n_features + bool(fit_intercept)))
hessProd[:, :n_features] = safe_sparse_dot(r_yhat.T, X)
hessProd[:, :n_features] += v * alpha
if fit_intercept:
hessProd[:, -1] = r_yhat.sum(axis=0)
return hessProd.ravel()
return grad, hessp
def _check_solver_option(solver, multi_class, penalty, dual):
if solver not in ['liblinear', 'newton-cg', 'lbfgs']:
raise ValueError("Logistic Regression supports only liblinear,"
" newton-cg and lbfgs solvers, got %s" % solver)
if multi_class not in ['multinomial', 'ovr']:
raise ValueError("multi_class should be either multinomial or "
"ovr, got %s" % multi_class)
if multi_class == 'multinomial' and solver == 'liblinear':
raise ValueError("Solver %s does not support "
"a multinomial backend." % solver)
if solver != 'liblinear':
if penalty != 'l2':
raise ValueError("Solver %s supports only l2 penalties, "
"got %s penalty." % (solver, penalty))
if dual:
raise ValueError("Solver %s supports only "
"dual=False, got dual=%s" % (solver, dual))
def logistic_regression_path(X, y, pos_class=None, Cs=10, fit_intercept=True,
max_iter=100, tol=1e-4, verbose=0,
solver='lbfgs', coef=None, copy=True,
class_weight=None, dual=False, penalty='l2',
intercept_scaling=1., multi_class='ovr',
random_state=None):
"""Compute a Logistic Regression model for a list of regularization
parameters.
This is an implementation that uses the result of the previous model
to speed up computations along the set of solutions, making it faster
than sequentially calling LogisticRegression for the different parameters.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,)
Input data, target values.
Cs : int | array-like, shape (n_cs,)
List of values for the regularization parameter or integer specifying
the number of regularization parameters that should be used. In this
case, the parameters will be chosen in a logarithmic scale between
1e-4 and 1e4.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
fit_intercept : bool
Whether to fit an intercept for the model. In this case the shape of
the returned array is (n_cs, n_features + 1).
max_iter : int
Maximum number of iterations for the solver.
tol : float
Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
will stop when ``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear'}
Numerical solver to use.
coef : array-like, shape (n_features,), default None
Initialization value for coefficients of logistic regression.
copy : bool, default True
Whether or not to produce a copy of the data. Setting this to
True will be useful in cases, when logistic_regression_path
is called repeatedly with the same data, as y is modified
along the path.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
intercept_scaling : float, default 1.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs' and
'newton-cg' solvers.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
Notes
-----
You might get slighly different results with the solver liblinear than
with the others since this uses LIBLINEAR which penalizes the intercept.
"""
if isinstance(Cs, numbers.Integral):
Cs = np.logspace(-4, 4, Cs)
_check_solver_option(solver, multi_class, penalty, dual)
# Preprocessing.
X = check_array(X, accept_sparse='csr', dtype=np.float64)
y = check_array(y, ensure_2d=False, copy=copy, dtype=None)
_, n_features = X.shape
check_consistent_length(X, y)
classes = np.unique(y)
random_state = check_random_state(random_state)
if pos_class is None and multi_class != 'multinomial':
if (classes.size > 2):
raise ValueError('To fit OvR, use the pos_class argument')
# np.unique(y) gives labels in sorted order.
pos_class = classes[1]
# If class_weights is a dict (provided by the user), the weights
# are assigned to the original labels. If it is "auto", then
# the class_weights are assigned after masking the labels with a OvR.
sample_weight = np.ones(X.shape[0])
le = LabelEncoder()
if isinstance(class_weight, dict):
if solver == "liblinear":
if classes.size == 2:
# Reconstruct the weights with keys 1 and -1
temp = {1: class_weight[pos_class],
-1: class_weight[classes[0]]}
class_weight = temp.copy()
else:
raise ValueError("In LogisticRegressionCV the liblinear "
"solver cannot handle multiclass with "
"class_weight of type dict. Use the lbfgs, "
"newton-cg solvers or set "
"class_weight='auto'")
else:
class_weight_ = compute_class_weight(class_weight, classes, y)
sample_weight = class_weight_[le.fit_transform(y)]
# For doing a ovr, we need to mask the labels first. for the
# multinomial case this is not necessary.
if multi_class == 'ovr':
w0 = np.zeros(n_features + int(fit_intercept))
mask_classes = [-1, 1]
mask = (y == pos_class)
y[mask] = 1
y[~mask] = -1
# To take care of object dtypes, i.e 1 and -1 are in the form of
# strings.
y = as_float_array(y, copy=False)
else:
lbin = LabelBinarizer()
Y_bin = lbin.fit_transform(y)
if Y_bin.shape[1] == 1:
Y_bin = np.hstack([1 - Y_bin, Y_bin])
w0 = np.zeros((Y_bin.shape[1], n_features + int(fit_intercept)),
order='F')
mask_classes = classes
if class_weight == "auto":
class_weight_ = compute_class_weight(class_weight, mask_classes, y)
sample_weight = class_weight_[le.fit_transform(y)]
if coef is not None:
# it must work both giving the bias term and not
if multi_class == 'ovr':
if coef.size not in (n_features, w0.size):
raise ValueError(
'Initialization coef is of shape %d, expected shape '
'%d or %d' % (coef.size, n_features, w0.size))
w0[:coef.size] = coef
else:
# For binary problems coef.shape[0] should be 1, otherwise it
# should be classes.size.
n_vectors = classes.size
if n_vectors == 2:
n_vectors = 1
if (coef.shape[0] != n_vectors or
coef.shape[1] not in (n_features, n_features + 1)):
raise ValueError(
'Initialization coef is of shape (%d, %d), expected '
'shape (%d, %d) or (%d, %d)' % (
coef.shape[0], coef.shape[1], classes.size,
n_features, classes.size, n_features + 1))
w0[:, :coef.shape[1]] = coef
if multi_class == 'multinomial':
# fmin_l_bfgs_b and newton-cg accepts only ravelled parameters.
w0 = w0.ravel()
target = Y_bin
if solver == 'lbfgs':
func = lambda x, *args: _multinomial_loss_grad(x, *args)[0:2]
elif solver == 'newton-cg':
func = lambda x, *args: _multinomial_loss(x, *args)[0]
grad = lambda x, *args: _multinomial_loss_grad(x, *args)[1]
hess = _multinomial_grad_hess
else:
target = y
if solver == 'lbfgs':
func = _logistic_loss_and_grad
elif solver == 'newton-cg':
func = _logistic_loss
grad = lambda x, *args: _logistic_loss_and_grad(x, *args)[1]
hess = _logistic_grad_hess
coefs = list()
for C in Cs:
if solver == 'lbfgs':
try:
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol, maxiter=max_iter)
except TypeError:
# old scipy doesn't have maxiter
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol)
if info["warnflag"] == 1 and verbose > 0:
warnings.warn("lbfgs failed to converge. Increase the number "
"of iterations.")
elif solver == 'newton-cg':
args = (X, target, 1. / C, sample_weight)
w0 = newton_cg(hess, func, grad, w0, args=args, maxiter=max_iter,
tol=tol)
elif solver == 'liblinear':
coef_, intercept_, _, = _fit_liblinear(
X, y, C, fit_intercept, intercept_scaling, class_weight,
penalty, dual, verbose, max_iter, tol, random_state)
if fit_intercept:
w0 = np.concatenate([coef_.ravel(), intercept_])
else:
w0 = coef_.ravel()
else:
raise ValueError("solver must be one of {'liblinear', 'lbfgs', "
"'newton-cg'}, got '%s' instead" % solver)
if multi_class == 'multinomial':
multi_w0 = np.reshape(w0, (classes.size, -1))
if classes.size == 2:
multi_w0 = multi_w0[1][np.newaxis, :]
coefs.append(multi_w0)
else:
coefs.append(w0)
return coefs, np.array(Cs)
# helper function for LogisticCV
def _log_reg_scoring_path(X, y, train, test, pos_class=None, Cs=10,
scoring=None, fit_intercept=False,
max_iter=100, tol=1e-4, class_weight=None,
verbose=0, solver='lbfgs', penalty='l2',
dual=False, copy=True, intercept_scaling=1.,
multi_class='ovr'):
"""Computes scores across logistic_regression_path
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target labels.
train : list of indices
The indices of the train set.
test : list of indices
The indices of the test set.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : list of floats | int
Each of the values in Cs describes the inverse of
regularization strength. If Cs is as an int, then a grid of Cs
values are chosen in a logarithmic scale between 1e-4 and 1e4.
If not provided, then a fixed set of values for Cs are used.
scoring : callable
For a list of scoring functions that can be used, look at
:mod:`sklearn.metrics`. The default scoring option used is
accuracy_score.
fit_intercept : bool
If False, then the bias term is set to zero. Else the last
term of each coef_ gives us the intercept.
max_iter : int
Maximum number of iterations for the solver.
tol : float
Tolerance for stopping criteria.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear'}
Decides which solver to use.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
intercept_scaling : float, default 1.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs'
solver.
copy : bool, default True
Whether or not to produce a copy of the data. Setting this to
True will be useful in cases, when ``_log_reg_scoring_path`` is called
repeatedly with the same data, as y is modified along the path.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
scores : ndarray, shape (n_cs,)
Scores obtained for each Cs.
"""
_check_solver_option(solver, multi_class, penalty, dual)
log_reg = LogisticRegression(fit_intercept=fit_intercept)
X_train = X[train]
X_test = X[test]
y_train = y[train]
y_test = y[test]
# The score method of Logistic Regression has a classes_ attribute.
if multi_class == 'ovr':
log_reg.classes_ = np.array([-1, 1])
elif multi_class == 'multinomial':
log_reg.classes_ = np.unique(y_train)
else:
raise ValueError("multi_class should be either multinomial or ovr, "
"got %d" % multi_class)
if pos_class is not None:
mask = (y_test == pos_class)
y_test[mask] = 1
y_test[~mask] = -1
# To deal with object dtypes, we need to convert into an array of floats.
y_test = as_float_array(y_test, copy=False)
coefs, Cs = logistic_regression_path(X_train, y_train, Cs=Cs,
fit_intercept=fit_intercept,
solver=solver,
max_iter=max_iter,
class_weight=class_weight,
copy=copy, pos_class=pos_class,
multi_class=multi_class,
tol=tol, verbose=verbose,
dual=dual, penalty=penalty,
intercept_scaling=intercept_scaling)
scores = list()
if isinstance(scoring, six.string_types):
scoring = SCORERS[scoring]
for w in coefs:
if multi_class == 'ovr':
w = w[np.newaxis, :]
if fit_intercept:
log_reg.coef_ = w[:, :-1]
log_reg.intercept_ = w[:, -1]
else:
log_reg.coef_ = w
log_reg.intercept_ = 0.
if scoring is None:
scores.append(log_reg.score(X_test, y_test))
else:
scores.append(scoring(log_reg, X_test, y_test))
return coefs, Cs, np.array(scores)
class LogisticRegression(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Logistic Regression (aka logit, MaxEnt) classifier.
In the multiclass case, the training algorithm uses the one-vs-rest (OvR)
scheme if the 'multi_class' option is set to 'ovr' and uses the
cross-entropy loss, if the 'multi_class' option is set to 'multinomial'.
(Currently the 'multinomial' option is supported only by the 'lbfgs' and
'newton-cg' solvers.)
This class implements regularized logistic regression using the
`liblinear` library, newton-cg and lbfgs solvers. It can handle both
dense and sparse input. Use C-ordered arrays or CSR matrices containing
64-bit floats for optimal performance; any other input format will be
converted (and copied).
The newton-cg and lbfgs solvers support only L2 regularization with primal
formulation. The liblinear solver supports both L1 and L2 regularization,
with a dual formulation only for the L2 penalty.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
C : float, optional (default=1.0)
Inverse of regularization strength; must be a positive float.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added the decision function.
intercept_scaling : float, default: 1
Useful only if solver is liblinear.
when self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
max_iter : int
Useful only for the newton-cg and lbfgs solvers. Maximum number of
iterations taken for the solvers to converge.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
solver : {'newton-cg', 'lbfgs', 'liblinear'}
Algorithm to use in the optimization problem.
tol : float, optional
Tolerance for stopping criteria.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs'
solver.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
Attributes
----------
coef_ : array, shape (n_classes, n_features)
Coefficient of the features in the decision function.
intercept_ : array, shape (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
If `fit_intercept` is set to False, the intercept is set to zero.
n_iter_ : int
Maximum of the actual number of iterations across all classes.
Valid only for the liblinear solver.
See also
--------
SGDClassifier : incrementally trained logistic regression (when given
the parameter ``loss="log"``).
sklearn.svm.LinearSVC : learns SVM models using the same algorithm.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon,
to have slightly different results for the same input data. If
that happens, try with a smaller tol parameter.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
References
----------
LIBLINEAR -- A Library for Large Linear Classification
http://www.csie.ntu.edu.tw/~cjlin/liblinear/
Hsiang-Fu Yu, Fang-Lan Huang, Chih-Jen Lin (2011). Dual coordinate descent
methods for logistic regression and maximum entropy models.
Machine Learning 85(1-2):41-75.
http://www.csie.ntu.edu.tw/~cjlin/papers/maxent_dual.pdf
See also
--------
sklearn.linear_model.SGDClassifier
"""
def __init__(self, penalty='l2', dual=False, tol=1e-4, C=1.0,
fit_intercept=True, intercept_scaling=1, class_weight=None,
random_state=None, solver='liblinear', max_iter=100,
multi_class='ovr', verbose=0):
self.penalty = penalty
self.dual = dual
self.tol = tol
self.C = C
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.random_state = random_state
self.solver = solver
self.max_iter = max_iter
self.multi_class = multi_class
self.verbose = verbose
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
Returns
-------
self : object
Returns self.
"""
if not isinstance(self.C, numbers.Number) or self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64, order="C")
self.classes_ = np.unique(y)
_check_solver_option(self.solver, self.multi_class, self.penalty,
self.dual)
if self.solver == 'liblinear':
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state)
return self
n_classes = len(self.classes_)
classes_ = self.classes_
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % classes_[0])
if len(self.classes_) == 2:
n_classes = 1
classes_ = classes_[1:]
self.coef_ = list()
self.intercept_ = np.zeros(n_classes)
# Hack so that we iterate only once for the multinomial case.
if self.multi_class == 'multinomial':
classes_ = [None]
for ind, class_ in enumerate(classes_):
coef_, _ = logistic_regression_path(
X, y, pos_class=class_, Cs=[self.C],
fit_intercept=self.fit_intercept, tol=self.tol,
verbose=self.verbose, solver=self.solver,
multi_class=self.multi_class, max_iter=self.max_iter,
class_weight=self.class_weight)
self.coef_.append(coef_[0])
self.coef_ = np.squeeze(self.coef_)
# For the binary case, this get squeezed to a 1-D array.
if self.coef_.ndim == 1:
self.coef_ = self.coef_[np.newaxis, :]
self.coef_ = np.asarray(self.coef_)
if self.fit_intercept:
self.intercept_ = self.coef_[:, -1]
self.coef_ = self.coef_[:, :-1]
return self
def predict_proba(self, X):
"""Probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in ``self.classes_``.
"""
return self._predict_proba_lr(X)
def predict_log_proba(self, X):
"""Log of probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in ``self.classes_``.
"""
return np.log(self.predict_proba(X))
class LogisticRegressionCV(LogisticRegression, BaseEstimator,
LinearClassifierMixin, _LearntSelectorMixin):
"""Logistic Regression CV (aka logit, MaxEnt) classifier.
This class implements logistic regression using liblinear, newton-cg or
LBFGS optimizer. The newton-cg and lbfgs solvers support only L2
regularization with primal formulation. The liblinear solver supports both
L1 and L2 regularization, with a dual formulation only for the L2 penalty.
For the grid of Cs values (that are set by default to be ten values in
a logarithmic scale between 1e-4 and 1e4), the best hyperparameter is
selected by the cross-validator StratifiedKFold, but it can be changed
using the cv parameter. In the case of newton-cg and lbfgs solvers,
we warm start along the path i.e guess the initial coefficients of the
present fit to be the coefficients got after convergence in the previous
fit, so it is supposed to be faster for high-dimensional dense data.
For a multiclass problem, the hyperparameters for each class are computed
using the best scores got by doing a one-vs-rest in parallel across all
folds and classes. Hence this is not the true multinomial loss.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
Cs : list of floats | int
Each of the values in Cs describes the inverse of regularization
strength. If Cs is as an int, then a grid of Cs values are chosen
in a logarithmic scale between 1e-4 and 1e4.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added the decision function.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
cv : integer or cross-validation generator
The default cross-validation generator used is Stratified K-Folds.
If an integer is provided, then it is the number of folds used.
See the module :mod:`sklearn.cross_validation` module for the
list of possible cross-validation objects.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
scoring : callabale
Scoring function to use as cross-validation criteria. For a list of
scoring functions that can be used, look at :mod:`sklearn.metrics`.
The default scoring option used is accuracy_score.
solver : {'newton-cg', 'lbfgs', 'liblinear'}
Algorithm to use in the optimization problem.
tol : float, optional
Tolerance for stopping criteria.
max_iter : int, optional
Maximum number of iterations of the optimization algorithm.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
n_jobs : int, optional
Number of CPU cores used during the cross-validation loop. If given
a value of -1, all cores are used.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
refit : bool
If set to True, the scores are averaged across all folds, and the
coefs and the C that corresponds to the best score is taken, and a
final refit is done using these parameters.
Otherwise the coefs, intercepts and C that correspond to the
best scores across folds are averaged.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs'
solver.
intercept_scaling : float, default 1.
Useful only if solver is liblinear.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
Attributes
----------
coef_ : array, shape (1, n_features) or (n_classes, n_features)
Coefficient of the features in the decision function.
`coef_` is of shape (1, n_features) when the given problem
is binary.
`coef_` is readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape (1,) or (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
It is available only when parameter intercept is set to True
and is of shape(1,) when the problem is binary.
Cs_ : array
Array of C i.e. inverse of regularization parameter values used
for cross-validation.
coefs_paths_ : array, shape ``(n_folds, len(Cs_), n_features)`` or \
``(n_folds, len(Cs_), n_features + 1)``
dict with classes as the keys, and the path of coefficients obtained
during cross-validating across each fold and then across each Cs
after doing an OvR for the corresponding class as values.
If the 'multi_class' option is set to 'multinomial', then
the coefs_paths are the coefficients corresponding to each class.
Each dict value has shape ``(n_folds, len(Cs_), n_features)`` or
``(n_folds, len(Cs_), n_features + 1)`` depending on whether the
intercept is fit or not.
scores_ : dict
dict with classes as the keys, and the values as the
grid of scores obtained during cross-validating each fold, after doing
an OvR for the corresponding class. If the 'multi_class' option
given is 'multinomial' then the same scores are repeated across
all classes, since this is the multinomial class.
Each dict value has shape (n_folds, len(Cs))
C_ : array, shape (n_classes,) or (n_classes - 1,)
Array of C that maps to the best scores across every class. If refit is
set to False, then for each class, the best C is the average of the
C's that correspond to the best scores for each fold.
See also
--------
LogisticRegression
"""
def __init__(self, Cs=10, fit_intercept=True, cv=None, dual=False,
penalty='l2', scoring=None, solver='lbfgs', tol=1e-4,
max_iter=100, class_weight=None, n_jobs=1, verbose=0,
refit=True, intercept_scaling=1., multi_class='ovr'):
self.Cs = Cs
self.fit_intercept = fit_intercept
self.cv = cv
self.dual = dual
self.penalty = penalty
self.scoring = scoring
self.tol = tol
self.max_iter = max_iter
self.class_weight = class_weight
self.n_jobs = n_jobs
self.verbose = verbose
self.solver = solver
self.refit = refit
self.intercept_scaling = intercept_scaling
self.multi_class = multi_class
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
Returns
-------
self : object
Returns self.
"""
_check_solver_option(self.solver, self.multi_class, self.penalty,
self.dual)
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
X = check_array(X, accept_sparse='csr', dtype=np.float64)
y = check_array(y, ensure_2d=False, dtype=None)
if y.ndim == 2 and y.shape[1] == 1:
warnings.warn(
"A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning)
y = np.ravel(y)
check_consistent_length(X, y)
# init cross-validation generator
cv = check_cv(self.cv, X, y, classifier=True)
folds = list(cv)
self._enc = LabelEncoder()
self._enc.fit(y)
labels = self.classes_ = np.unique(y)
n_classes = len(labels)
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % self.classes_[0])
if n_classes == 2:
# OvR in case of binary problems is as good as fitting
# the higher label
n_classes = 1
labels = labels[1:]
# We need this hack to iterate only once over labels, in the case of
# multi_class = multinomial, without changing the value of the labels.
iter_labels = labels
if self.multi_class == 'multinomial':
iter_labels = [None]
if self.class_weight and not(isinstance(self.class_weight, dict) or
self.class_weight in ['balanced', 'auto']):
raise ValueError("class_weight provided should be a "
"dict or 'balanced'")
path_func = delayed(_log_reg_scoring_path)
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
path_func(X, y, train, test, pos_class=label, Cs=self.Cs,
fit_intercept=self.fit_intercept, penalty=self.penalty,
dual=self.dual, solver=self.solver, tol=self.tol,
max_iter=self.max_iter, verbose=self.verbose,
class_weight=self.class_weight, scoring=self.scoring,
multi_class=self.multi_class,
intercept_scaling=self.intercept_scaling
)
for label in iter_labels
for train, test in folds)
if self.multi_class == 'multinomial':
multi_coefs_paths, Cs, multi_scores = zip(*fold_coefs_)
multi_coefs_paths = np.asarray(multi_coefs_paths)
multi_scores = np.asarray(multi_scores)
# This is just to maintain API similarity between the ovr and
# multinomial option.
# Coefs_paths in now n_folds X len(Cs) X n_classes X n_features
# we need it to be n_classes X len(Cs) X n_folds X n_features
# to be similar to "ovr".
coefs_paths = np.rollaxis(multi_coefs_paths, 2, 0)
# Multinomial has a true score across all labels. Hence the
# shape is n_folds X len(Cs). We need to repeat this score
# across all labels for API similarity.
scores = np.tile(multi_scores, (n_classes, 1, 1))
self.Cs_ = Cs[0]
else:
coefs_paths, Cs, scores = zip(*fold_coefs_)
self.Cs_ = Cs[0]
coefs_paths = np.reshape(coefs_paths, (n_classes, len(folds),
len(self.Cs_), -1))
self.coefs_paths_ = dict(zip(labels, coefs_paths))
scores = np.reshape(scores, (n_classes, len(folds), -1))
self.scores_ = dict(zip(labels, scores))
self.C_ = list()
self.coef_ = np.empty((n_classes, X.shape[1]))
self.intercept_ = np.zeros(n_classes)
# hack to iterate only once for multinomial case.
if self.multi_class == 'multinomial':
scores = multi_scores
coefs_paths = multi_coefs_paths
for index, label in enumerate(iter_labels):
if self.multi_class == 'ovr':
scores = self.scores_[label]
coefs_paths = self.coefs_paths_[label]
if self.refit:
best_index = scores.sum(axis=0).argmax()
C_ = self.Cs_[best_index]
self.C_.append(C_)
if self.multi_class == 'multinomial':
coef_init = np.mean(coefs_paths[:, best_index, :, :],
axis=0)
else:
coef_init = np.mean(coefs_paths[:, best_index, :], axis=0)
w, _ = logistic_regression_path(
X, y, pos_class=label, Cs=[C_], solver=self.solver,
fit_intercept=self.fit_intercept, coef=coef_init,
max_iter=self.max_iter, tol=self.tol,
penalty=self.penalty,
class_weight=self.class_weight,
multi_class=self.multi_class,
verbose=max(0, self.verbose - 1))
w = w[0]
else:
# Take the best scores across every fold and the average of all
# coefficients corresponding to the best scores.
best_indices = np.argmax(scores, axis=1)
w = np.mean([coefs_paths[i][best_indices[i]]
for i in range(len(folds))], axis=0)
self.C_.append(np.mean(self.Cs_[best_indices]))
if self.multi_class == 'multinomial':
self.C_ = np.tile(self.C_, n_classes)
self.coef_ = w[:, :X.shape[1]]
if self.fit_intercept:
self.intercept_ = w[:, -1]
else:
self.coef_[index] = w[: X.shape[1]]
if self.fit_intercept:
self.intercept_[index] = w[-1]
self.C_ = np.asarray(self.C_)
return self
| bsd-3-clause |
S0MEC0DE/v1 | setup.py | 1 | 2987 | #! /usr/bin/env python
#
import os
# temporarily redirect config directory to prevent matplotlib importing
# testing that for writeable directory which results in sandbox error in
# certain easy_install versions
os.environ["MPLCONFIGDIR"] = "."
DESCRIPTION = "Somecode Twitter Science and Research Platform"
LONG_DESCRIPTION = """\
SOMECODE is a research platform for serious observation and analysis
of Twitter data. SOMECODE brings together 9 years of unbroken continuity
in developing social media research tools. Previous tools and processes
developed by the contributor team are in daily use by many FORTUNE100
companies and major advertising agencies. SOMECODE is the solution we
always wanted to build, but due to the kinds of restraints commercial
entities have, never got to.
"""
DISTNAME = 'somecode'
MAINTAINER = 'Mikko Kotila'
MAINTAINER_EMAIL = '[email protected]'
URL = 'http://botlab.io'
LICENSE = 'MIT'
DOWNLOAD_URL = 'https://github.com/S0MEC0DE/'
VERSION = '0.9.9'
try:
from setuptools import setup
_has_setuptools = True
except ImportError:
from distutils.core import setup
def check_dependencies():
install_requires = []
# Just make sure dependencies exist, I haven't rigorously
# tested what the minimal versions that will work are
# (help on that would be awesome)
try:
import numpy
except ImportError:
install_requires.append('numpy')
try:
import seaborn
except ImportError:
install_requires.append('seaborn')
try:
import matplotlib
except ImportError:
install_requires.append('matplotlib')
try:
import pandas
except ImportError:
install_requires.append('pandas')
try:
import nltk
except ImportError:
install_requires.append('nltk')
try:
import tweepy
except ImportError:
install_requires.append('tweepy')
try:
import twython
except ImportError:
install_requires.append('twython')
try:
import IPython
except ImportError:
install_requires.append('IPython')
install_requires.append('python-tk')
return install_requires
if __name__ == "__main__":
install_requires = check_dependencies()
setup(name=DISTNAME,
author=MAINTAINER,
author_email=MAINTAINER_EMAIL,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
install_requires=install_requires,
packages=['somecode'],
classifiers=[
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 2.7',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS'],
)
| mit |
CartoDB/crankshaft | release/python/0.2.0/crankshaft/crankshaft/segmentation/segmentation.py | 15 | 7186 | """
Segmentation creation and prediction
"""
import sklearn
import numpy as np
import plpy
from sklearn.ensemble import GradientBoostingRegressor
from sklearn import metrics
from sklearn.cross_validation import train_test_split
# Lower level functions
#----------------------
def replace_nan_with_mean(array):
"""
Input:
@param array: an array of floats which may have null-valued entries
Output:
array with nans filled in with the mean of the dataset
"""
# returns an array of rows and column indices
indices = np.where(np.isnan(array))
# iterate through entries which have nan values
for row, col in zip(*indices):
array[row, col] = np.mean(array[~np.isnan(array[:, col]), col])
return array
def get_data(variable, feature_columns, query):
"""
Fetch data from the database, clean, and package into
numpy arrays
Input:
@param variable: name of the target variable
@param feature_columns: list of column names
@param query: subquery that data is pulled from for the packaging
Output:
prepared data, packaged into NumPy arrays
"""
columns = ','.join(['array_agg("{col}") As "{col}"'.format(col=col) for col in feature_columns])
try:
data = plpy.execute('''SELECT array_agg("{variable}") As target, {columns} FROM ({query}) As a'''.format(
variable=variable,
columns=columns,
query=query))
except Exception, e:
plpy.error('Failed to access data to build segmentation model: %s' % e)
# extract target data from plpy object
target = np.array(data[0]['target'])
# put n feature data arrays into an n x m array of arrays
features = np.column_stack([np.array(data[0][col], dtype=float) for col in feature_columns])
return replace_nan_with_mean(target), replace_nan_with_mean(features)
# High level interface
# --------------------
def create_and_predict_segment_agg(target, features, target_features, target_ids, model_parameters):
"""
Version of create_and_predict_segment that works on arrays that come stright form the SQL calling
the function.
Input:
@param target: The 1D array of lenth NSamples containing the target variable we want the model to predict
@param features: Thw 2D array of size NSamples * NFeatures that form the imput to the model
@param target_ids: A 1D array of target_ids that will be used to associate the results of the prediction with the rows which they come from
@param model_parameters: A dictionary containing parameters for the model.
"""
clean_target = replace_nan_with_mean(target)
clean_features = replace_nan_with_mean(features)
target_features = replace_nan_with_mean(target_features)
model, accuracy = train_model(clean_target, clean_features, model_parameters, 0.2)
prediction = model.predict(target_features)
accuracy_array = [accuracy]*prediction.shape[0]
return zip(target_ids, prediction, np.full(prediction.shape, accuracy_array))
def create_and_predict_segment(query, variable, target_query, model_params):
"""
generate a segment with machine learning
Stuart Lynn
"""
## fetch column names
try:
columns = plpy.execute('SELECT * FROM ({query}) As a LIMIT 1 '.format(query=query))[0].keys()
except Exception, e:
plpy.error('Failed to build segmentation model: %s' % e)
## extract column names to be used in building the segmentation model
feature_columns = set(columns) - set([variable, 'cartodb_id', 'the_geom', 'the_geom_webmercator'])
## get data from database
target, features = get_data(variable, feature_columns, query)
model, accuracy = train_model(target, features, model_params, 0.2)
cartodb_ids, result = predict_segment(model, feature_columns, target_query)
accuracy_array = [accuracy]*result.shape[0]
return zip(cartodb_ids, result, accuracy_array)
def train_model(target, features, model_params, test_split):
"""
Train the Gradient Boosting model on the provided data and calculate the accuracy of the model
Input:
@param target: 1D Array of the variable that the model is to be trianed to predict
@param features: 2D Array NSamples * NFeatures to use in trining the model
@param model_params: A dictionary of model parameters, the full specification can be found on the
scikit learn page for [GradientBoostingRegressor](http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.GradientBoostingRegressor.html)
@parma test_split: The fraction of the data to be withheld for testing the model / calculating the accuray
"""
features_train, features_test, target_train, target_test = train_test_split(features, target, test_size=test_split)
model = GradientBoostingRegressor(**model_params)
model.fit(features_train, target_train)
accuracy = calculate_model_accuracy(model, features, target)
return model, accuracy
def calculate_model_accuracy(model, features, target):
"""
Calculate the mean squared error of the model prediction
Input:
@param model: model trained from input features
@param features: features to make a prediction from
@param target: target to compare prediction to
Output:
mean squared error of the model prection compared to the target
"""
prediction = model.predict(features)
return metrics.mean_squared_error(prediction, target)
def predict_segment(model, features, target_query):
"""
Use the provided model to predict the values for the new feature set
Input:
@param model: The pretrained model
@features: A list of features to use in the model prediction (list of column names)
@target_query: The query to run to obtain the data to predict on and the cartdb_ids associated with it.
"""
batch_size = 1000
joined_features = ','.join(['"{0}"::numeric'.format(a) for a in features])
try:
cursor = plpy.cursor('SELECT Array[{joined_features}] As features FROM ({target_query}) As a'.format(
joined_features=joined_features,
target_query=target_query))
except Exception, e:
plpy.error('Failed to build segmentation model: %s' % e)
results = []
while True:
rows = cursor.fetch(batch_size)
if not rows:
break
batch = np.row_stack([np.array(row['features'], dtype=float) for row in rows])
#Need to fix this. Should be global mean. This will cause weird effects
batch = replace_nan_with_mean(batch)
prediction = model.predict(batch)
results.append(prediction)
try:
cartodb_ids = plpy.execute('''SELECT array_agg(cartodb_id ORDER BY cartodb_id) As cartodb_ids FROM ({0}) As a'''.format(target_query))[0]['cartodb_ids']
except Exception, e:
plpy.error('Failed to build segmentation model: %s' % e)
return cartodb_ids, np.concatenate(results)
| bsd-3-clause |
nvoron23/scikit-learn | examples/mixture/plot_gmm.py | 248 | 2817 | """
=================================
Gaussian Mixture Model Ellipsoids
=================================
Plot the confidence ellipsoids of a mixture of two Gaussians with EM
and variational Dirichlet process.
Both models have access to five components with which to fit the
data. Note that the EM model will necessarily use all five components
while the DP model will effectively only use as many as are needed for
a good fit. This is a property of the Dirichlet Process prior. Here we
can see that the EM model splits some components arbitrarily, because it
is trying to fit too many components, while the Dirichlet Process model
adapts it number of state automatically.
This example doesn't show it, as we're in a low-dimensional space, but
another advantage of the Dirichlet process model is that it can fit
full covariance matrices effectively even when there are less examples
per cluster than there are dimensions in the data, due to
regularization properties of the inference algorithm.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
# Fit a mixture of Gaussians with EM using five components
gmm = mixture.GMM(n_components=5, covariance_type='full')
gmm.fit(X)
# Fit a Dirichlet process mixture of Gaussians using five components
dpgmm = mixture.DPGMM(n_components=5, covariance_type='full')
dpgmm.fit(X)
color_iter = itertools.cycle(['r', 'g', 'b', 'c', 'm'])
for i, (clf, title) in enumerate([(gmm, 'GMM'),
(dpgmm, 'Dirichlet Process GMM')]):
splot = plt.subplot(2, 1, 1 + i)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(
clf.means_, clf._get_covars(), color_iter)):
v, w = linalg.eigh(covar)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title(title)
plt.show()
| bsd-3-clause |
katiehamren/splash | splash/zspec_information.py | 1 | 5360 | import os
import re
import pdb
import warnings
import numpy as np
import pandas as pd
from astropy.io import fits
from utils import _parse_filename
warnings.filterwarnings('ignore')
zspec_tags = ['Z', 'ZQUAL','ZSNR','ABAND','MJD','AIRM']
def zquality(zspec_path, spec1d_files, tags, serendip_file=None):
'''
Return the values associated with "tags" for the stars in spec1d_files
:param zspec_path: path to zspec files
:param spec1d_files: list/array of spec1d_files
:param tags: list/array of tags to return.
:param serendip_file: file (with path) containing serendip information
:return: A dataframe that contains those tags from the "tags" input that can be accomplished with a
'''
if (zspec_path is None) or not os.path.isdir(zspec_path):
raise AttributeError("zspec path does not exist")
if (type(spec1d_files) != list) & (type(spec1d_files) != np.ndarray):
raise AttributeError('spec1d_files must be a list or array')
elif type(spec1d_files) == list:
spec1d_files = np.array(spec1d_files)
if serendip_file is None:
print 'skipping serendips'
elif serendip_file == 'Default':
if os.path.isfile('%s/SLIT.goodFormat' %(zspec_path)):
serendip_file = '%s/SLIT.goodFormat' %(zspec_path)
print 'using default serendip file'
else:
raise IOError('no default serendip file')
else:
if not os.path.isfile(serendip_file):
raise IOError('input serendip file does not exist')
# Get the masks associated with these spec1d files, so as to go mask by mask
all_masks = np.array([_parse_filename(sf)[0] for sf in spec1d_files])
# Go mask by mask and get a dataframe of all zspec-related tags
df_list = []
for m in all_masks:
zfile = '%s/zspec.%s.fits' %(zspec_path, m)
sfiles = spec1d_files[all_masks == m]
df = read_zspec_fits(zfile, sfiles, serendip_file)
df_list.append(df)
# Concatenate dataframes into a master
full_df = pd.concat(df_list)
# Include all tags that are either included in the desired list OR are necessary later
# Things that will matter later include
# - Z, ZQUAL (for shift & stitch)
# - MJD, ABAND (for kinematics)
return full_df[[c for c in full_df.columns if (c in tags) | (c in ['Z','ZQUAL','MJD','ABAND'])]]
def read_zspec_fits(zfile, spec1d_files, serendip_file):
'''
Given an input zspec file and a list of spec1d_files, return a dataframe containing all possible tags
:param zfile: zspec file
:param spec1d_files: list of spec1d_files associated with the input zfile
:return:
'''
mask = re.split('\.', zfile)[1]
if os.path.isfile(zfile):
hduZ = fits.open(zfile)
zspec = hduZ[1].data
else:
raise ValueError('invalid zfile, does not exist')
df = pd.DataFrame(columns=zspec_tags, index=spec1d_files)
# walk through each spec1d_file one at a time
for sf in spec1d_files:
# find the relevant entry in the zspec file by using the spec1d_file tag (where available)
if 'SPEC1D_FILE' in hduZ[1].columns.names:
charInds = np.core.defchararray.find(zspec.SPEC1D_FILE,sf)
ind = np.argwhere(charInds != -1)
else:
zspec_spec1d_file = np.array(['spec1d.%s.%s.%s.fits.gz' %(mask, s.zfill(3), o)
for s, o in zip(zspec.SLITNAME, zspec.OBJNAME)])
charInds = np.core.defchararray.find(zspec_spec1d_file,sf)
ind = np.argwhere(charInds != -1)
try:
i = ind[0][0]
z = zspec.Z[i]
zqual = zspec.ZQUALITY[i]
snr = zspec.SN[i]
if 'ABAND' in hduZ[1].columns.names:
aband = zspec.ABAND[i]
else:
aband = np.nan
if 'MJD' in hduZ[1].columns.names:
mjd = zspec.MJD[i]
else:
mjd = np.nan
if 'AIRMASS' in hduZ[1].columns.names:
airm = zspec.AIRMASS[i]
else:
airm = np.nan
except IndexError:
# Test if this is a serendip
if serendip_file is None:
airm = mjd = aband = snr = zqual = z = np.nan
else:
z = read_serendip_zqual(sf, serendip_file)
zqual = 3
airm = mjd = aband = snr = np.nan
# This part determines if there are -1s in the zspec file. If there *are*, then zquality = 1 is a
# manual velocity determination. If there *aren't*, then zquality = 1 is crap
if zqual == 1:
zq = zspec.ZQUALITY
if -1 not in zq:
zqual = 0
# Write this star into the df
df.loc[sf] = pd.Series({'Z':z, 'ZQUAL':zqual,'ZSNR':snr,'ABAND':aband,'MJD':mjd,'AIRM':airm})
return df
def read_serendip_zqual(spec1d_file, serendip_file):
'''
Get z value from serendip file
:param spec1d_file:
:param serendip_file:
:return: z
'''
try:
zfile, z = np.loadtxt(serendip_file, usecols = (0,1), dtype = 'S30,f', unpack = True)
except:
raise ValueError('could not read serendip file')
if spec1d_file in zfile:
return z[file == zfile]
else:
return np.nan
| mit |
rspavel/spack | var/spack/repos/builtin/packages/py-astropy/package.py | 3 | 4192 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import os
class PyAstropy(PythonPackage):
"""The Astropy Project is a community effort to develop a single core
package for Astronomy in Python and foster interoperability between
Python astronomy packages."""
homepage = 'https://astropy.org/'
url = 'https://pypi.io/packages/source/a/astropy/astropy-4.0.1.post1.tar.gz'
install_time_test_callbacks = ['install_test', 'import_module_test']
version('4.0.1.post1', sha256='5c304a6c1845ca426e7bc319412b0363fccb4928cb4ba59298acd1918eec44b5')
version('3.2.1', sha256='706c0457789c78285e5464a5a336f5f0b058d646d60f4e5f5ba1f7d5bf424b28')
version('2.0.14', sha256='618807068609a4d8aeb403a07624e9984f566adc0dc0f5d6b477c3658f31aeb6')
version('1.1.2', sha256='6f0d84cd7dfb304bb437dda666406a1d42208c16204043bc920308ff8ffdfad1')
version('1.1.post1', sha256='64427ec132620aeb038e4d8df94d6c30df4cc8b1c42a6d8c5b09907a31566a21')
variant('extras', default=False, description='Enable extra functionality')
# Required dependencies
depends_on('[email protected]:', when='@4.0:', type=('build', 'run'))
depends_on('[email protected]:', when='@3.0:', type=('build', 'run'))
depends_on('[email protected]:2.8,3.4:', when='@2.0:', type=('build', 'run'))
depends_on('[email protected]:2.8,3.3:', when='@1.2:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('[email protected]:', type='build')
depends_on('[email protected]:', when='@4.0:', type=('build', 'run'))
depends_on('[email protected]:', when='@3.1:', type=('build', 'run'))
depends_on('[email protected]:', when='@3.0:', type=('build', 'run'))
depends_on('[email protected]:', when='@2.0:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.2:', type=('build', 'run'))
depends_on('py-numpy', type=('build', 'run'))
depends_on('pkgconfig', type='build')
# Optional dependencies
depends_on('[email protected]:', when='+extras', type=('build', 'run'))
depends_on('py-h5py', when='+extras', type=('build', 'run'))
depends_on('py-beautifulsoup4', when='+extras', type=('build', 'run'))
depends_on('py-html5lib', when='+extras', type=('build', 'run'))
depends_on('py-bleach', when='+extras', type=('build', 'run'))
depends_on('py-pyyaml', when='+extras', type=('build', 'run'))
depends_on('py-pandas', when='+extras', type=('build', 'run'))
depends_on('py-bintrees', when='+extras', type=('build', 'run'))
depends_on('py-sortedcontainers', when='+extras', type=('build', 'run'))
depends_on('py-pytz', when='+extras', type=('build', 'run'))
depends_on('py-jplephem', when='+extras', type=('build', 'run'))
depends_on('[email protected]:', when='+extras', type=('build', 'run'))
depends_on('py-scikit-image', when='+extras', type=('build', 'run'))
depends_on('py-mpmath', when='+extras', type=('build', 'run'))
depends_on('[email protected]:', when='+extras', type=('build', 'run'))
depends_on('py-bottleneck', when='+extras', type=('build', 'run'))
depends_on('py-pytest', when='+extras', type=('build', 'run'))
# System dependencies
depends_on('erfa')
depends_on('wcslib')
depends_on('cfitsio')
depends_on('expat')
def patch(self):
# forces the rebuild of files with cython
# avoids issues with PyCode_New() in newer
# versions of python in the distributed
# cython-ized files
os.remove('astropy/cython_version.py')
def build_args(self, spec, prefix):
args = [
'--use-system-libraries',
'--use-system-erfa',
'--use-system-wcslib',
'--use-system-cfitsio',
'--use-system-expat'
]
if spec.satisfies('^python@3:'):
args.extend(['-j', str(make_jobs)])
return args
def install_test(self):
with working_dir('spack-test', create=True):
python('-c', 'import astropy; astropy.test()')
| lgpl-2.1 |
RPGOne/scikit-learn | sklearn/tests/test_common.py | 5 | 9222 | """
General tests for all estimators in sklearn.
"""
# Authors: Andreas Mueller <[email protected]>
# Gael Varoquaux [email protected]
# License: BSD 3 clause
from __future__ import print_function
import os
import warnings
import sys
import re
import pkgutil
from sklearn.externals.six import PY3
from sklearn.utils.testing import assert_false, clean_warning_registry
from sklearn.utils.testing import all_estimators
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import _named_check
import sklearn
from sklearn.cluster.bicluster import BiclusterMixin
from sklearn.linear_model.base import LinearClassifierMixin
from sklearn.utils.estimator_checks import (
_yield_all_checks,
CROSS_DECOMPOSITION,
check_parameters_default_constructible,
check_class_weight_balanced_linear_classifier,
check_transformer_n_iter,
check_non_transformer_estimators_n_iter,
check_get_params_invariance)
def test_all_estimator_no_base_class():
# test that all_estimators doesn't find abstract classes.
for name, Estimator in all_estimators():
msg = ("Base estimators such as {0} should not be included"
" in all_estimators").format(name)
assert_false(name.lower().startswith('base'), msg=msg)
def test_all_estimators():
# Test that estimators are default-constructible, cloneable
# and have working repr.
estimators = all_estimators(include_meta_estimators=True)
# Meta sanity-check to make sure that the estimator introspection runs
# properly
assert_greater(len(estimators), 0)
for name, Estimator in estimators:
# some can just not be sensibly default constructed
yield (_named_check(check_parameters_default_constructible, name),
name, Estimator)
def test_non_meta_estimators():
# input validation etc for non-meta estimators
estimators = all_estimators()
for name, Estimator in estimators:
if issubclass(Estimator, BiclusterMixin):
continue
if name.startswith("_"):
continue
for check in _yield_all_checks(name, Estimator):
yield _named_check(check, name), name, Estimator
def test_configure():
# Smoke test the 'configure' step of setup, this tests all the
# 'configure' functions in the setup.pys in the scikit
cwd = os.getcwd()
setup_path = os.path.abspath(os.path.join(sklearn.__path__[0], '..'))
setup_filename = os.path.join(setup_path, 'setup.py')
if not os.path.exists(setup_filename):
return
try:
os.chdir(setup_path)
old_argv = sys.argv
sys.argv = ['setup.py', 'config']
clean_warning_registry()
with warnings.catch_warnings():
# The configuration spits out warnings when not finding
# Blas/Atlas development headers
warnings.simplefilter('ignore', UserWarning)
if PY3:
with open('setup.py') as f:
exec(f.read(), dict(__name__='__main__'))
else:
execfile('setup.py', dict(__name__='__main__'))
finally:
sys.argv = old_argv
os.chdir(cwd)
def test_class_weight_balanced_linear_classifiers():
classifiers = all_estimators(type_filter='classifier')
clean_warning_registry()
with warnings.catch_warnings(record=True):
linear_classifiers = [
(name, clazz)
for name, clazz in classifiers
if ('class_weight' in clazz().get_params().keys() and
issubclass(clazz, LinearClassifierMixin))]
for name, Classifier in linear_classifiers:
yield _named_check(check_class_weight_balanced_linear_classifier,
name), name, Classifier
@ignore_warnings
def test_import_all_consistency():
# Smoke test to check that any name in a __all__ list is actually defined
# in the namespace of the module or package.
pkgs = pkgutil.walk_packages(path=sklearn.__path__, prefix='sklearn.',
onerror=lambda _: None)
submods = [modname for _, modname, _ in pkgs]
for modname in submods + ['sklearn']:
if ".tests." in modname:
continue
package = __import__(modname, fromlist="dummy")
for name in getattr(package, '__all__', ()):
if getattr(package, name, None) is None:
raise AttributeError(
"Module '{0}' has no attribute '{1}'".format(
modname, name))
def test_root_import_all_completeness():
EXCEPTIONS = ('utils', 'tests', 'base', 'setup')
for _, modname, _ in pkgutil.walk_packages(path=sklearn.__path__,
onerror=lambda _: None):
if '.' in modname or modname.startswith('_') or modname in EXCEPTIONS:
continue
assert_in(modname, sklearn.__all__)
def test_all_tests_are_importable():
# Ensure that for each contentful subpackage, there is a test directory
# within it that is also a subpackage (i.e. a directory with __init__.py)
HAS_TESTS_EXCEPTIONS = re.compile(r'''(?x)
\.externals(\.|$)|
\.tests(\.|$)|
\._
''')
lookup = dict((name, ispkg)
for _, name, ispkg
in pkgutil.walk_packages(sklearn.__path__,
prefix='sklearn.'))
missing_tests = [name for name, ispkg in lookup.items()
if ispkg
and not HAS_TESTS_EXCEPTIONS.search(name)
and name + '.tests' not in lookup]
assert_equal(missing_tests, [],
'{0} do not have `tests` subpackages. Perhaps they require '
'__init__.py or an add_subpackage directive in the parent '
'setup.py'.format(missing_tests))
def test_non_transformer_estimators_n_iter():
# Test that all estimators of type which are non-transformer
# and which have an attribute of max_iter, return the attribute
# of n_iter atleast 1.
for est_type in ['regressor', 'classifier', 'cluster']:
regressors = all_estimators(type_filter=est_type)
for name, Estimator in regressors:
# LassoLars stops early for the default alpha=1.0 for
# the iris dataset.
if name == 'LassoLars':
estimator = Estimator(alpha=0.)
else:
with ignore_warnings(category=DeprecationWarning):
estimator = Estimator()
if hasattr(estimator, "max_iter"):
# These models are dependent on external solvers like
# libsvm and accessing the iter parameter is non-trivial.
if name in (['Ridge', 'SVR', 'NuSVR', 'NuSVC',
'RidgeClassifier', 'SVC', 'RandomizedLasso',
'LogisticRegressionCV']):
continue
# Tested in test_transformer_n_iter below
elif (name in CROSS_DECOMPOSITION or
name in ['LinearSVC', 'LogisticRegression']):
continue
else:
# Multitask models related to ENet cannot handle
# if y is mono-output.
yield (_named_check(
check_non_transformer_estimators_n_iter, name),
name, estimator, 'Multi' in name)
def test_transformer_n_iter():
transformers = all_estimators(type_filter='transformer')
for name, Estimator in transformers:
with ignore_warnings(category=DeprecationWarning):
estimator = Estimator()
# Dependent on external solvers and hence accessing the iter
# param is non-trivial.
external_solver = ['Isomap', 'KernelPCA', 'LocallyLinearEmbedding',
'RandomizedLasso', 'LogisticRegressionCV']
if hasattr(estimator, "max_iter") and name not in external_solver:
yield _named_check(
check_transformer_n_iter, name), name, estimator
def test_get_params_invariance():
# Test for estimators that support get_params, that
# get_params(deep=False) is a subset of get_params(deep=True)
# Related to issue #4465
estimators = all_estimators(include_meta_estimators=False,
include_other=True)
for name, Estimator in estimators:
if hasattr(Estimator, 'get_params'):
# If class is deprecated, ignore deprecated warnings
if hasattr(Estimator.__init__, "deprecated_original"):
with ignore_warnings():
yield _named_check(
check_get_params_invariance, name), name, Estimator
else:
yield _named_check(
check_get_params_invariance, name), name, Estimator
| bsd-3-clause |
kaltwang/latenttrees | latenttrees/lt_model.py | 1 | 118932 | import collections, copy, random, warnings, sys, builtins, time, datetime
from itertools import chain
import numpy as np
import networkx as nx
from scipy.stats import gaussian_kde
import matplotlib.pyplot as plt
#from latenttrees.lt_helper import *
from latenttrees.lt_helper import is_message_1to2, calc_lklhd_parent_messages, imshow_values, \
NanSelect, select_max_undecorated, select_weighted_random_undecorated, select_random_undecorated, \
select_random_metropolis_undecorated
from misc.numpy_helper import normalize_convex, is_obj_array, ProgressLine, has_equiv_shape, cut_max, norm_logpdf, \
normalize_convex_log, expand_array, obj_array_get_N
from misc.python_helper import has_elements, isequal_or_none, get_and_set_attr
# profile magic: define @profile decorator on the fly, if not defined by the kernprof script
# see http://stackoverflow.com/questions/18229628/python-profiling-using-line-profiler-clever-way-to-remove-profile-statements
try:
builtins.profile
except AttributeError:
# No line profiler, provide a pass-through version
def profile(func): return func
builtins.profile = profile
class ObjectRoot(object):
def __init__(self):
# type(self).print_enabled = True # static (class) variable
self.print_enabled = True # static (class) variable
def _print(self, str_):
if self.print_enabled:
print(self._print_prefix() + str_)
def _print_prefix(self):
classname = type(self).__name__
timestr = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
prefix = timestr + " [" + classname + "] "
return prefix
class GraphObject(ObjectRoot):
COPY_PREFIX = '_copy_'
def __init__(self):
super(GraphObject, self).__init__()
def clear(self, prop_set):
for p in prop_set:
setattr(self, p, None)
def copy(self, prop_set):
for p in prop_set:
value_copy = copy.deepcopy(getattr(self, p))
setattr(self, GraphObject.COPY_PREFIX + p, value_copy)
def recover_copy(self, prop_set):
for p in prop_set:
value_copy = getattr(self, GraphObject.COPY_PREFIX + p)
setattr(self, p, value_copy)
delattr(self, GraphObject.COPY_PREFIX + p)
class Node(GraphObject):
def __init__(self, k_):
self.k = k_
self.prior = None
self.layer = 0
self.create_new_prior()
def create_new_prior(self):
self.prior = DistribFactory.random(1, self.k)
def set_k(self, k_):
self.k = k_
if self.prior is not None:
self.create_new_prior()
class Edge(GraphObject):
def __init__(self, k1_, k2_):
if k1_ > 1:
self.k1 = k1_
self.k2 = k2_
self.distrib = DistribFactory.random(k1_, k2_)
else:
raise ValueError('k1_={} must be greater than 1 (parents always need to be categorical)!'.format(k1_))
def set_k1(self, k1_):
self.__init__(k1_, self.k2)
def set_k2(self, k2_):
self.__init__(self.k1, k2_)
class NodeEdgeFactory(object):
def __init__(self, properties_=None):
self._instances_created = False
if properties_ is not None:
self._properties = set(properties_) # string list of property names
else:
self._properties = set()
def _init_properties(self, obj):
obj.clear(self._properties)
self._instances_created = True
def register_properties(self, prop_set):
# check first if any element from prop_set is actually new
if not self._properties >= prop_set:
if self._instances_created:
raise RuntimeError('Properties must be added before the first class instantiation!')
self._properties.update(prop_set)
class NodeFactory(NodeEdgeFactory):
def __init__(self, properties_=None):
super(NodeFactory, self).__init__(properties_)
def create_node(self, k):
result = Node(k)
super(NodeFactory, self)._init_properties(result)
return result
class EdgeFactory(NodeEdgeFactory):
def __init__(self, properties_=None):
super(EdgeFactory, self).__init__(properties_)
def create_edge(self, k1, k2):
result = Edge(k1, k2)
super(EdgeFactory, self)._init_properties(result)
return result
class Graph(GraphObject):
OBJ_STR = 'obj' # dictionary key string that is used to access node and edge objects within networkx graph
def __init__(self):
super(Graph, self).__init__()
self.__id_node_next = 0
self.__nxgraph = nx.DiGraph()
self.__node_factory = NodeFactory()
self.__edge_factory = EdgeFactory()
self.__id_roots = set()
self.required_axes_total = 0
self.axes = None
self.figure = None
self.required_axes = 1
self.id_axes = self.register_axes(self.required_axes)
self.print_enabled = False
def get_dof(self, id_node):
dof = 0
prior = self.node(id_node).prior
if prior is not None:
dof += prior.get_dof()
for id_node1, id_node2, edge in self.edges_iter(nbunch=id_node, data=True):
dof_act = edge.distrib.get_dof()
dof += dof_act
return dof
def set_k(self, id_node, k):
node = self.node(id_node)
node.set_k(k)
for id_node1, id_node2, edge in self.edges_iter(nbunch=id_node, data=True):
if id_node1 == id_node:
edge.set_k1(k)
else:
assert id_node2 == id_node
edge.set_k2(k)
def get_axes(self, id_axes):
if self.figure is None or (not plt.fignum_exists(self.figure.number)):
# create figure and axes
self.figure, self.axes = plt.subplots(nrows=1, ncols=self.required_axes_total, squeeze=True, figsize=(20, 6.66))
axes_curr = [self.axes[id] for id in id_axes]
for ax in axes_curr:
ax.cla()
return axes_curr
def register_axes(self, required_axes):
id_axes_new = [x for x in range(self.required_axes_total, self.required_axes_total+required_axes)]
self.required_axes_total += required_axes
return id_axes_new
def get_id_node_next(self):
return self.__id_node_next
def has_node(self, id_node):
return self.__nxgraph.has_node(id_node)
def number_of_nodes(self):
return self.__nxgraph.number_of_nodes()
def has_edge(self, id_node1, id_node2):
return self.__nxgraph.has_edge(id_node1, id_node2)
def has_parent(self, id_node):
edges = self.in_edges_iter(id_node, data=False)
return has_elements(edges)
def get_parent(self, id_node):
id_parents = [x for x in self.in_edges_iter(id_node, data=False)]
num_parents = len(id_parents)
assert num_parents <= 1 # tree structure allows one parent at most
if num_parents == 1:
return id_parents[0]
else:
return None
def get_root(self, id_node):
id_parent = self.get_parent(id_node)
if id_parent is None:
return id_node
else:
return self.get_root(id_parent)
def get_id_roots(self):
return self.__id_roots
def get_num_roots(self):
return len(self.__id_roots)
def degree(self, id_node):
return self.__nxgraph.degree(id_node)
def out_degree(self, id_node):
return self.__nxgraph.out_degree(id_node)
def node(self, id_node):
"""Access node object.
:param id_node: the node id
"""
return self.__nxgraph.node[id_node][Graph.OBJ_STR]
def edge(self, id_node1, id_node2):
"""Access edge object for edge from id_node1 to id_node2.
:param id_node1: edge origin
:param id_node2: edge destination
"""
return self.__nxgraph[id_node1][id_node2][Graph.OBJ_STR]
def add_node(self, k):
"""Adds a new node to the graph.
:param k: cardinality of the node (k==1 for Gaussian and k>1 for categorical random variables)
"""
id_node = self.__id_node_next
self.__id_node_next += 1
node_ = self.__node_factory.create_node(k)
self.__nxgraph.add_node(id_node, {Graph.OBJ_STR: node_})
# side effects
self.__add_root(id_node) # initially, every node is a root
self._print("add id_node={}".format(id_node))
return id_node
def add_nodes(self, K):
"""Create nodes defined by the types vector k
:param K: iterable that contains k values for all nodes to create
"""
self._print("adding {} nodes".format(len(K)))
id_nodes = [self.add_node(k) for k in K]
return id_nodes
def remove_node(self, id_node):
self._print("remove id_node={}".format(id_node))
assert self.has_node(id_node)
# side effects
self.__remove_root(id_node)
g = self.__nxgraph
degree = g.degree(id_node)
if degree > 0:
raise ValueError(
'id_node={} has edges connected to it. Remove all edges first before removing the node!'.format(
id_node))
g.remove_node(id_node)
def add_edge(self, id_node1, id_node2):
"""Adds a new edge to the graph.
:param id_node1: edge origin
:param id_node2: edge destination
"""
self._print("add edge=({},{})".format(id_node1, id_node2))
# check the structure
self.__check_potential_edge(id_node1, id_node2)
# side effects
self.__remove_root(id_node2) # not root anymore
node1 = self.node(id_node1)
node2 = self.node(id_node2)
edge_ = self.__edge_factory.create_edge(node1.k, node2.k)
self.__nxgraph.add_edge(id_node1, id_node2, {Graph.OBJ_STR: edge_})
# q needs to be recomputed
node1.q_dirty = True
node2.q_dirty = True
# layer of node1 might change
# node1.layer = max(node1.layer, node2.layer+1)
self.__update_layer_recursive(id_node2, node2)
def __update_layer_recursive(self, id_node, node):
id_parent = self.get_parent(id_node)
if id_parent is not None:
parent = self.node(id_parent)
parent.layer = max(parent.layer, node.layer+1)
self.__update_layer_recursive(id_parent, parent)
def remove_edge(self, id_node1, id_node2):
"""Removes an edge from the graph.
:param id_node1: edge origin
:param id_node2: edge destination
"""
self._print("remove edge=({},{})".format(id_node1, id_node2))
assert self.has_edge(id_node1, id_node2)
self.__nxgraph.remove_edge(id_node1, id_node2)
# side effects
# by the structure restriction, id_node2 has only one parent and thus becomes a root
self.__add_root(id_node2)
# q needs to be recomputed
self.node(id_node1).q_dirty = True
self.node(id_node2).q_dirty = True
# layer of node1 might change
self.__recalculate_layer(id_node1)
def __recalculate_layer(self, id_node):
layer_options = [self.node(id_child).layer + 1 for id_child in self.out_edges_iter(id_node)] + [0]
layer_new = max(layer_options)
if self.node(id_node).layer != layer_new:
self.node(id_node).layer = layer_new
# we need to recursively check the next parent
id_parent = self.get_parent(id_node)
if id_parent is not None:
self.__recalculate_layer(id_parent)
def __check_potential_edge(self, id_node1, id_node2):
"""This functions ensures that each node has at most one parent, when adding a potential edge.
(Note: this function does not prevent the graph from having cycles!)
:param id_node1: edge origin
:param id_node2: edge destination
"""
# first check that both nodes exist
assert self.has_node(id_node1)
assert self.has_node(id_node2)
# then assert the id_node2 does not have a parent yet
in_degree_node2 = self.__nxgraph.in_degree(id_node2)
assert in_degree_node2 == 0
def __add_root(self, id_root):
self.__id_roots.add(id_root)
# add prior again
node = self.node(id_root)
if node.prior is None:
node.create_new_prior()
def __remove_root(self, id_root):
self.__id_roots.discard(id_root)
# remove prior
node = self.node(id_root)
node.prior = None
def is_root(self, id_node):
return id_node in self.__id_roots
def draw(self, id_highlight=None):
fig, ax = plt.subplots(1,self.required_axes)
if not isinstance(ax, collections.Iterable):
ax = [ax]
self.draw_axes(ax, id_highlight)
fig.tight_layout(pad=0, w_pad=0, h_pad=0)
plt.show()
def draw_axes(self, ax=None, id_highlight=None):
# plt.ion() # interactive mode on (doesn't work with pycharm)
g = self.__nxgraph
# new call format for networkx 1.11 (uses pygraphviz, which does not support Python 3!)
# pos = nx.drawing.nx_agraph.graphviz_layout(g, prog='dot')
# old call format for networkx 1.10
# pos = nx.graphviz_layout(g, prog='dot')
# new call format for networkx 1.11 (uses pydot)
pos = nx.drawing.nx_pydot.graphviz_layout(g, prog='dot')
if ax is None:
ax = self.get_axes(self.id_axes)
assert len(ax) == self.required_axes
nx.draw(g, pos, ax=ax[0], hold=True, with_labels=True, arrows=True)
# nx.draw_networkx(self.__graph)
# plot the roots
if id_highlight is None:
id_highlight = self.__id_roots
for r in id_highlight:
ax[0].scatter(pos[r][0], pos[r][1], s=500)
for id_node, node in self.nodes_iter(data=True):
c = "{}".format(node.layer)
ax[0].text(pos[id_node][0], pos[id_node][1], "\n" + c, va='top', ha='center', color='blue')
# plt.show() # this call blocks further execution until window is closed
def print_distrib(self):
pass
def nodes(self, data=False):
nx = self.__nxgraph.nodes(data=data)
if data:
n = map(lambda x: (x[0], x[1][self.OBJ_STR]), nx)
else:
n = nx
return n
def nodes_iter(self, data=False):
nx_iter = self.__nxgraph.nodes_iter(data=data)
"""nx_iter returns tuples where the first element is the id_node and the second element is a dictionary
of the node data"""
if data:
iter_ = map(lambda x: (x[0], x[1][self.OBJ_STR]), nx_iter)
else:
iter_ = nx_iter
return iter_
def edges_iter(self, nbunch=None, data=False):
nx_iter_out = self.__nxgraph.out_edges_iter(nbunch=nbunch, data=data)
nx_iter_in = self.__nxgraph.in_edges_iter(nbunch=nbunch, data=data)
nx_iter = chain(nx_iter_out, nx_iter_in)
"""nx_iter returns triples where the first two elements are id_node1 and id_node2 and the third element is a
dictionary of the edge data"""
if data:
iter_ = map(lambda x: (x[0], x[1], x[2][self.OBJ_STR]), nx_iter)
else:
iter_ = nx_iter
return iter_
def edges_iter_except(self, id_node, id_except, data=False):
iter_ = self.edges_iter(nbunch=id_node, data=data)
if id_except is not None:
iter_ = filter(lambda x: (x[0] != id_except) and (x[1] != id_except), iter_)
return iter_
def out_edges_iter(self, id_node, data=False):
nx_iter = self.__nxgraph.out_edges_iter(nbunch=(id_node,), data=data)
if data:
iter_ = map(lambda x: (x[1], x[2][self.OBJ_STR]), nx_iter)
else:
iter_ = map(lambda x: x[1], nx_iter)
return iter_
def in_edges_iter(self, id_node, data=False):
nx_iter = self.__nxgraph.in_edges_iter(nbunch=(id_node,), data=data)
if data:
iter_ = map(lambda x: (x[0], x[2][self.OBJ_STR]), nx_iter)
else:
iter_ = map(lambda x: x[0], nx_iter)
return iter_
def register_properties(self, prop_graph, prop_node, prop_edge):
self.clear(prop_graph)
self.__node_factory.register_properties(prop_node)
self.__edge_factory.register_properties(prop_edge)
def clear_properties(self, prop_graph, prop_node, prop_edge):
"""Clear properties from nodes and edges.
:param prop_graph: set of strings with graph properties
:param prop_node: set of strings with node properties
:param prop_edge: set of strings with edge properties
"""
self.clear(prop_graph)
for node_id, node in self.nodes_iter(data=True):
node.clear(prop_node)
for node_id1, node_id2, edge in self.edges_iter(data=True):
edge.clear(prop_edge)
def properties_func(self, id_root, func, recursive=True, prop_node=None, prop_edge=None):
if prop_node is not None:
func_node = lambda id, node: func(node, prop_node)
else:
func_node = None
if prop_edge is not None:
func_edge = lambda id1, id2, edge: func(edge, prop_edge)
else:
func_edge = None
self.func_iter(id_root, recursive=recursive, func_node=func_node, func_edge=func_edge)
def func_iter(self, id_roots=None, recursive=True, func_node=None, func_edge=None):
g = self
if id_roots is None:
id_roots = g.get_id_roots()
elif not isinstance(id_roots, collections.Iterable):
id_roots = (id_roots, )
for id_root in id_roots:
self.__func_iter(id_root, recursive=recursive, id_except=None, func_node=func_node, func_edge=func_edge)
def __func_iter(self, id_node, recursive=True, id_except=None, func_node=None, func_edge=None):
if func_node is not None:
node = self.node(id_node)
func_node(id_node, node)
edges = self.edges_iter_except(id_node, id_except, data=True)
for id_node1, id_node2, edge in edges:
if func_edge is not None:
func_edge(id_node1, id_node2, edge)
if recursive:
_, id_next = is_message_1to2(id_node1, id_node2, id_node)
self.__func_iter(id_next, recursive=recursive, id_except=id_node, func_node=func_node, func_edge=func_edge)
def get_adjlist(self):
# the returned adjacency list is guaranteed to be ordered in a way, that all targets in each line have been
# defined before.
#gen = nx.generate_adjlist(self.nxgraph)
dict_of_lists = nx.to_dict_of_lists(self.__nxgraph)
# sort according id1 (the second sort below is stable, thus this will be the secondary order)
# sorting key is the first item, i.e. id1
list_of_tuples = sorted(dict_of_lists.items(), key=lambda x: x[0])
adjlist = []
layer = []
for id1, ids2 in list_of_tuples:
lay = self.node(id1).layer
adjlist.append([id1] + ids2)
layer.append(lay)
# sort according layer
# sorting key is the first item, i.e. the layer
adjlist_reordered = [i[0] for i in sorted(zip(adjlist, layer), key=lambda x:x[1])]
# rename the ids to id new, which use consecutive numbers starting from 0
id_nodes = [line[0] for line in adjlist_reordered]
id2idn = {} # idn: id new
for idn, id in enumerate(id_nodes):
id2idn[id] = idn
# rename all ids from id to idn and sort them
adjlist_renamed = []
for line in adjlist_reordered:
line_renamed = [id2idn[id] for id in line]
line_renamed = [line_renamed[0]] + sorted(line_renamed[1:])
adjlist_renamed.append(line_renamed)
return adjlist_renamed
class GraphManipulator(ObjectRoot):
"""Interface class for all graph manipulators (data, inference, parameter_update and structure_update)"""
def __init__(self, graph):
super(GraphManipulator, self).__init__()
assert isinstance(graph, Graph)
self._graph = graph
self._prop_graph = set()
self._prop_node = set()
self._prop_edge = set()
self.required_axes = 0
self.id_axes = None
def _register_properties(self):
g = self._graph
g.register_properties(self._prop_graph, self._prop_node, self._prop_edge)
if self.required_axes > 0:
self.id_axes = g.register_axes(self.required_axes)
def clear_properties(self):
self._graph.clear_properties(self._prop_graph, self._prop_node, self._prop_edge)
def run(self, id_roots=None, recursive=True):
g = self._graph
if id_roots is None:
id_roots = g.get_id_roots()
elif not isinstance(id_roots, collections.Iterable):
id_roots = (id_roots, )
# self._print('id_roots={}'.format(id_roots))
lklhd = 0
for id_root in id_roots:
lklhd_root = self._run(id_root, recursive=recursive) # call the run method of the child class
if lklhd_root is not None:
lklhd += lklhd_root
return lklhd
def draw(self):
fig, ax = plt.subplots(1,self.required_axes)
self.draw_axes(ax)
plt.show()
class Data(GraphManipulator):
"""This class handles the input of observed data and the output of inferred data."""
def __init__(self, graph):
super(Data, self).__init__(graph)
# x contains the observed data for the corresponding node and None otherwise.
self._prop_node = {'x'}
self._prop_graph = {'N'}
self._register_properties()
self.gauss_init_std = 1
self.gauss_init_std_rand = 0
def __insert_samples(self, id_node, samples):
"""Insert data x for specific node.
:param id_node: 1x1 node identifier
:param samples: Nx1 numpy array
"""
g = self._graph
node = g.node(id_node)
assert isinstance(samples, np.ndarray)
# check is samples are a cat. distribution or values
if is_obj_array(samples):
# object array with shape (1,)
samples = samples[0]
N = samples.shape[0]
if samples.ndim == 1:
# samples are values
distrib = DistribFactory.from_samples(samples, node.k)
elif node.k >= 2 and node.k == samples.shape[1]:
#samples describe categorical distributions
distrib = DistribFactory.empty(N, node.k)
distrib.init_data(samples)
else:
raise ValueError("samples have wrong shape={}, for k={}".format(samples.shape, node.k))
else:
N = samples.shape[0]
distrib = DistribFactory.from_samples(samples, node.k)
assert isequal_or_none(N, g.N)
g.N = N
node.x = distrib
def insert_samples(self, id_nodes, samples):
"""Insert data x for multiple nodes.
:param id_nodes: Mx1 node identifier
:param samples: NxM numpy array
"""
self._print("inserting N={} samples into M={} nodes".format(obj_array_get_N(samples), len(id_nodes)))
for i, id_node in enumerate(id_nodes):
self.__insert_samples(id_node, samples[:, i])
def __distrib_init(self, id_node):
"""Initialize prior of id_node and outgoing edges by using observed data x
:param id_node: node ID
"""
g = self._graph
node = g.node(id_node)
# initialize id_node prior, if id_node has prior
if g.is_root(id_node):
if node.x is not None:
node.prior.init_distrib_idx(node.x, idx=None)
else:
node.prior.init_random()
# initialize all outgoing edges (id_node, id_child_n)
num_edges = g.out_degree(id_node)
edges = g.out_edges_iter(id_node, data=True)
idx = np.random.choice(g.N, size=(node.k,), replace=False)
for id_child, edge in edges:
child = g.node(id_child)
has_data = child.x is not None
# only use random idx of datapoints for Gaussians
# (Cat datapoints usually are collapsed distributions to a single state)
is_gaussian = child.k == 1
if has_data and is_gaussian:
if num_edges > 1:
edge.distrib.init_distrib_idx(child.x, idx)
else:
edge.distrib.init_distrib_equidistant_rand(child.x, self.gauss_init_std, self.gauss_init_std_rand)
else:
edge.distrib.init_random()
child.q_dirty = True
node.q_dirty = True
# def _run(self, id_root, recursive, id_except=None):
# self.__distrib_init(id_root)
#
# if recursive:
# edges = self._graph.edges_iter_except(id_root, id_except)
# for id_node1, id_node2 in edges:
# _, id_next = is_message_1to2(id_node1, id_node2, id_root)
# self._run(id_next, recursive, id_except=id_root)
def _run(self, id_root, recursive, id_except=None):
func_node = lambda id, node: self.__distrib_init(id)
self._graph.func_iter(id_root, recursive, func_node)
class BeliefPropagation(GraphManipulator):
"""This class handles the belief propagation inference algorithm."""
def __init__(self, graph):
super(BeliefPropagation, self).__init__(graph)
# lklhd: N x 1 (conditional) log-likelihoods of the datapoints
# q: N x k posterior distribution of the node
# q_dirty: Boolean flag that determines if q needs to be recalculated
# message_1to2 (alpha): message id_node1 -> id_node2 (from parent to child)
# message_2to1 (beta): message id_node1 <- id_node2 (from child to parent)
self._prop_node = {'lklhd', 'q', 'q_dirty'}
self._prop_edge = {'message_1to2', 'message_2to1'}
# requires prop_graph: N
# requires prop_node: x, prior
# requires prop_edge: distrib
self._register_properties()
self.extract_samples_mode = 'max'
@profile
def _run(self, id_root, recursive):
if recursive:
rec = sys.maxsize
else:
rec = 1
self.__inward_pass(id_center=id_root, id_in=None, recursive=rec)
self.__outward_pass(id_center=id_root, id_in=None, recursive=rec)
lklhd = self.get_lklhd(id_root)
return lklhd
@profile
def get_message(self, id_src, id_dest):
g = self._graph
if g.has_edge(id_src, id_dest):
return g.edge(id_src, id_dest).message_1to2
else:
assert g.has_edge(id_dest, id_src)
return g.edge(id_dest, id_src).message_2to1
@classmethod
@profile
def __get_message_from_edge(cls, id_node1, id_node2, edge, id_dest):
is_message_1to2_, id_src = is_message_1to2(id_node1, id_node2, id_dest)
if is_message_1to2_:
return edge.message_1to2
else:
return edge.message_2to1
@profile
def __set_message(self, id_dest, id_src, messages_prod):
# messages_prod : N x k_node
# message: N x k_origin
g = self._graph
if g.has_edge(id_src, id_dest):
# update alpha message
# edge.distrib: k_node x k_origin
# for alpha message, we need to renormalize first
# since each distrib is normalized, we only need to set the log_constant to zero
edge = g.edge(id_src, id_dest)
messages_prod.set_log_const_zero()
edge.message_1to2 = messages_prod.dot(edge.distrib)
else:
# update beta message
# edge.distrib: k_origin x k_node
edge = g.edge(id_dest, id_src)
edge.message_2to1 = messages_prod.dot_transpose(edge.distrib)
node = g.node(id_dest)
node.q_dirty = True # incoming messages have changed, thus q has changed
@profile
def get_messages_prod(self, id_node, id_except, include_prior=True):
g = self._graph
node = g.node(id_node)
messages = []
if node.x is not None:
messages.append(node.x)
if (node.prior is not None) and include_prior:
messages.append(node.prior)
edges = g.edges_iter_except(id_node, id_except, data=True)
for id_node1, id_node2, edge in edges:
message = self.__get_message_from_edge(id_node1, id_node2, edge, id_node)
messages.append(message)
if (len(messages) == 1) and (messages[0].get_k1() == g.N):
# this is just for speedup, the later 'else' statement would also work
messages_prod = messages[0].copy()
else:
messages_prod = DistribFactory.uniform(g.N, node.k)
messages_prod.prod(messages)
return messages_prod
@profile
def __inward_pass(self, id_center, id_in=None, recursive=sys.maxsize): # beta_ci pass
# sets message that runs towards id_in from id_center
# to start with a root
# call __inward_pass(id_root, recursive=1) for non-recursive and
# call __inward_pass(id_root, recursive=sys.maxsize) for recursive calls
g = self._graph
if recursive > 0:
edges_oc = g.edges_iter_except(id_center, id_in, data=False)
for id_node1, id_node2 in edges_oc:
_, id_out = is_message_1to2(id_node1, id_node2, id_center)
self.__inward_pass(id_center=id_out, id_in=id_center, recursive=recursive-1)
if id_in is not None:
# only if there is a destination for the message
messages_oc_prod = self.get_messages_prod(id_center, id_in)
self.__set_message(id_in, id_center, messages_oc_prod)
# Maybe: save here the product of all incoming messages, in order to be able to calculate later q() and the
# conditional likelihood C()
@profile
def __outward_pass(self, id_center, id_in=None, recursive=sys.maxsize):
# sets message that run towards id_center from id_in
g = self._graph
if id_in is not None:
# only if there is a source of the message (otherwise it is the root)
# in the outward pass, id_in and id_center roles are switched (in comparison to the inward pass)
messages_prod = self.get_messages_prod(id_in, id_center)
self.__set_message(id_center, id_in, messages_prod)
if recursive > 0:
edges_oc = g.edges_iter_except(id_center, id_in, data=False)
for id_node1, id_node2 in edges_oc:
_, id_out = is_message_1to2(id_node1, id_node2, id_center)
self.__outward_pass(id_center=id_out, id_in=id_center, recursive=recursive-1)
@profile
def get_lklhd(self, id_node):
node = self._graph.node(id_node)
self.__calc_q_single(id_node)
lklhd = np.mean(node.lklhd) # normalized log-likelihood; the mean divides implicitly by N
return lklhd
@profile
def get_q(self, id_node):
node = self._graph.node(id_node)
self.__calc_q_single(id_node)
return node.q
@profile
def get_lklhd_all(self):
g = self._graph
id_roots = g.get_id_roots()
lklhd = 0
for id_root in id_roots:
lklhd += self.get_lklhd(id_root)
return lklhd
@profile
def __calc_q_single(self, id_node):
g = self._graph
node = g.node(id_node)
if (node.q_dirty is not None) and (not node.q_dirty):
# lazy calculation of q
return
messages_prod = self.get_messages_prod(id_node, id_except=None)
if isinstance(messages_prod, DistribCat):
lklhd = messages_prod.get_log_const().copy()
# messages_prod.set_log_const_zero()
# id_parent = g.get_parent(id_node)
# if id_parent is not None:
# # just to double check
# lklhd2 = self.__calc_lklhd_parent(id_node, id_parent)
# assert np.allclose(lklhd, lklhd2)
else:
id_parent = g.get_parent(id_node)
assert id_parent is not None # each Gaussian node must have exactly one parent
# this calculation is only possible if the Gaussian node has exactly one parent.
lklhd = self.calc_lklhd_parent(id_node, id_parent)
q = messages_prod
node.q = q
node.q_dirty = False # q has just been updated
node.lklhd = lklhd
@profile
def calc_lklhd_parent(self, id_node, id_parent, message_child2parent_pot=None):
messages_prod = self.get_messages_prod(id_parent, id_except=id_node)
if message_child2parent_pot is not None:
messages_prod.prod([message_child2parent_pot])
message = self.get_message(id_node, id_parent)
lklhd = calc_lklhd_parent_messages(messages_prod, message)
return lklhd
@profile
def __extract_samples(self, id_node):
q = self.get_q(id_node)
samples = q.extract_samples(mode=self.extract_samples_mode)
return samples
@profile
def extract_samples(self, id_nodes):
N = self._graph.N
M = len(id_nodes)
samples = np.zeros((N, M))
for i, id_node in enumerate(id_nodes):
samples[:,i:(i+1)] = self.__extract_samples(id_node)
return samples
@profile
def visual_gauss_edge(self, id_node1, id_node2):
g = self._graph
xs, ds = g.node(id_node2).x.visual_get_kde()
mu = g.node(id_node2).x.get_mu()
dens = g.edge(id_node1,id_node2).distrib.visual_get_density(xs)
plt.plot(xs,np.concatenate((ds[:, np.newaxis], dens), axis=1))
plt.scatter(mu, np.zeros_like(mu))
plt.show()
class Distrib(object):
def __init__(self, k1, k2):
self._k1 = k1
self._k2 = k2
def get_k1(self):
return self._k1
def get_k2(self):
return self._k2
# dot(Cat, Cat)
# dot(Cat, Gauss)
# dot(Gauss, Gauss)
# dot(Gauss, Cat)
# transpose(Cat)
class DistribCat(Distrib):
def __init__(self, k1, k2):
assert k2 > 1
super(DistribCat, self).__init__(k1, k2)
# values = values_norm * exp( log_const )
self.__values_norm = None # k1 x k2
self.__log_const = None # k1 x 1
def get_dof(self):
dof = self._k1 * (self._k2 - 1)
return dof
@profile
def copy(self):
distrib = DistribFactory.empty(self._k1, self._k2)
distrib.init_data_raw(self.__values_norm.copy(), self.__log_const.copy())
return distrib
def get_values(self):
return self.__values_norm * np.exp(self.__log_const)
def get_values_norm(self):
return self.__values_norm
def get_log_const(self):
return self.__log_const
def set_log_const_zero(self):
self.__log_const.fill(0)
@profile
def prod(self, distribs):
for distrib in distribs:
self.__values_norm *= distrib.get_values_norm()
self.__log_const += distrib.get_log_const()
self.__normalize_convex()
@profile
def dot(self, distrib):
# self: N x k1 (Cat)
# distrib: k1 x k2 (Cat / Gauss)
# out: N x k2 (Cat / Gauss)
assert (self.get_log_const() == 0).all() # alpha message should have been normalized
values_norm = self.__values_norm # N x k1
if isinstance(distrib, DistribCat):
assert (distrib.get_log_const() == 0).all()
values = values_norm.dot(distrib.get_values_norm())
result = DistribCat(values.shape[0], values.shape[1])
result.init_data_unnormalized(values)
else:
assert isinstance(distrib, DistribGauss)
mu = distrib.get_mu() # k1 x 1
std = distrib.get_std() # k1 x 1
# means are calculated by the same dot product as Cat
data_m = values_norm.dot(mu) # N x 1
# variance within components
var_within = values_norm.dot(np.square(std))
# variance between components
var_between = np.sum(values_norm * np.square(data_m - mu.reshape((1, -1))), axis=1, keepdims=True)
data_s = np.sqrt(var_within + var_between)
result = DistribGauss(values_norm.shape[0])
result.init_data(data_m, data_s)
return result
@profile
def dot_transpose(self, distrib):
# self: N x k2
# distrib: k1 x k2
# out = self * distrib^T: N x k1
assert isinstance(distrib, DistribCat) # should never be called for DistribGauss
assert (distrib.get_log_const() == 0).all()
values = self.__values_norm.dot(distrib.get_values_norm().transpose())
result = DistribCat(values.shape[0], values.shape[1])
result.init_data_unnormalized(values, self.__log_const)
return result
@profile
def init_samples(self, samples, k):
assert k > 1
values_norm = self.__samples2distrib(samples, k)
self.init_data(values_norm)
@profile
def extract_samples(self, mode):
if mode == 'max':
samples = self.__distrib2samples_max(self.__values_norm)
elif mode == 'exp':
samples = self.__distrib2samples_exp(self.__values_norm)
else:
assert False
return samples
@profile
def init_uniform(self):
self.__values_norm = np.empty((self._k1, self._k2))
self.__values_norm.fill(1/self._k2)
self.__log_const = np.empty((self._k1, 1))
# uniform means in this case a distribution of all ones!
# this ensures that the likelihood corresponds to an unobserved message.
self.__log_const.fill(np.log(self._k2))
@profile
def init_random(self):
values_norm = np.ones((self._k1, self._k2)) / self._k2
self.__add_random(values_norm, 0.2)
self.init_data(values_norm)
@profile
def init_data_unnormalized(self, values_norm, log_const=None):
if log_const is None:
log_const = np.zeros((values_norm.shape[0], 1))
log_const += np.log(normalize_convex(values_norm, axis=1))
self.init_data_raw(values_norm, log_const)
@profile
def init_data_raw(self, values_norm, log_const):
self.__values_norm = values_norm
self.__log_const = log_const
@profile
def init_data(self, values_norm, log_const=None):
assert has_equiv_shape(values_norm, (self._k1, self._k2))
assert np.allclose(np.sum(values_norm, axis=1), 1)
if log_const is not None:
assert has_equiv_shape(log_const, (self._k1, 1))
else:
log_const = np.zeros((self._k1, 1))
self.__values_norm = values_norm
self.__log_const = log_const
@profile
def init_distrib_idx(self, distrib, idx=None):
assert isinstance(distrib, DistribCat)
x = distrib.get_values_norm()
if idx is None:
# initialize prior and thus average over all cases
assert self._k1 == 1
values_norm = np.mean(x, axis=0, keepdims=True)
else:
# select cases idx
x_idx = x[idx,:]
assert self._k1 == x_idx.shape[0]
values_norm = np.copy(x_idx)
self.__add_random(values_norm, 0.1)
self.init_data(values_norm)
@profile
def update_prior(self, distrib, N_0):
# self: 1 x k2
# distrib: N x k2
M2 = distrib.get_values_norm()
M2_t = M2[:,:,np.newaxis].transpose((2, 1, 0)) # 1 x k2 x N
values_norm = self.__normalize_prior(M2_t, N_0)
self.init_data(values_norm)
@profile
def update_cpd(self, distrib1, distrib2, N_0, exclude_self=False):
# self=x: k1 x k2
# M1: N x k1
# M2: N x k2
M1 = distrib1.get_values_norm()
M2 = distrib2.get_values_norm()
M1_t = M1[:,:,np.newaxis].transpose((1, 2, 0)) # k1 x 1 x N
M2_t = M2[:,:,np.newaxis].transpose((2, 1, 0)) # 1 x k2 x N
if not exclude_self:
x = self.get_values_norm()
x_t = x[:,:,np.newaxis] # k1 x k2 x 1
q = M1_t * M2_t * x_t # k1 x k2 x N
else:
q = M1_t * M2_t # k1 x k2 x N
q /= np.sum(q, axis=(0, 1), keepdims=True)
values_norm = self.__normalize_prior(q, N_0)
assert np.all(np.isfinite(values_norm))
self.init_data(values_norm)
def as_str(self, num=None):
values_norm = cut_max(self.__values_norm, num)
log_const = cut_max(self.__log_const, num)
result = 'values_norm=\n{}'.format(values_norm)
result +='\nlog_const={}\n'.format(log_const.ravel())
return result
def plot(self):
fig, ax = plt.subplots()
cax = ax.imshow(self.__values_norm, interpolation='nearest', aspect='auto', vmin=0, vmax=1)
fig.colorbar(cax)
@profile
def __normalize_convex(self):
self.__log_const += np.log(normalize_convex(self.__values_norm, axis=1))
@staticmethod
@profile
def __normalize_prior(q, N_0):
# see Murphy (2012) Machine Learning - A Probabilistic Perspective, p.80 Eq. (3.47)
# uniform alpha: alpha_k = alpha -> alpha_0 = K
# N_0 = K(a-1) (pseudo counts)
# q: k1 x k2 x N
K = q.shape[1]
k1 = q.shape[0]
values_biased = np.sum(q, axis=2, keepdims=True).squeeze(axis=(2,)) # k1 x k2
N = np.sum(values_biased, axis=1, keepdims=True) # k1 x 1
values_uniform = np.ones((1, K)) / K
values = values_biased * (1 / (N + N_0))
values += values_uniform * (N_0 / (N + N_0))
# # N_0 = K (alpha-1) -> alpha = (N_0 / K) + 1
# # A pseudo-count of N_0 corresponds to a Dirichlet prior with alpha = (N_0 / K) + 1
# alpha = (N_0 / K) + 1
# alpha_all = np.ones((K,))*alpha
# lklhd_prior = dirichlet.logpdf(values.T, alpha_all).reshape(k1, 1)
# lklhd_mode = dirichlet.logpdf(values_uniform.T, alpha_all)
# lklhd_prior -= lklhd_mode
return values
@staticmethod
@profile
def __samples2distrib(samples, k):
N = samples.shape[0]
isnan = np.isnan(samples)
notnan = np.logical_not(isnan)
x_real = samples[notnan]
assert np.all(np.equal(np.mod(x_real, 1), 0)) # all values must be integer
assert np.all((x_real >= 0) & (x_real < k)) # all values must be within (0, k-1)
values_norm = np.zeros((N, k))
ind1 = np.arange(N)[notnan]
ind2 = x_real.astype(int)
values_norm[ind1, ind2] = 1.
values_norm[isnan, :] = 1. / k
return values_norm
@staticmethod
@profile
def __distrib2samples_max(values_norm):
idx = np.argmax(values_norm, axis=1)
samples = idx[:, np.newaxis]
return samples
@staticmethod
@profile
def __distrib2samples_exp(values_norm):
k = values_norm.shape[1]
zero2k = np.arange(k)[:, np.newaxis] # k x 1
samples = values_norm.dot(zero2k) # N x 1
return samples
@staticmethod
@profile
def __add_random(dist, perc):
k1 = dist.shape[0]
k2 = dist.shape[1]
dist_rand = np.random.rand(k1, k2)
normalize_convex(dist_rand, axis=1)
dist *= (1-perc)
dist += perc * dist_rand
class DistribGauss(Distrib):
def __init__(self, k1):
super(DistribGauss, self).__init__(k1, 1)
self.__mu = None # k1 x 1 mean
self.__std = None # k1 x 1 standard deviation
def get_dof(self):
dof = self._k1 * 2
return dof
def copy(self):
distrib = DistribFactory.empty(self._k1, self._k2)
distrib.init_data(self.__mu.copy(), self.__std.copy())
return distrib
def get_mu(self):
return self.__mu
def get_std(self):
return self.__std
@profile
def __prod_single(self, distrib):
mu1 = self.__mu
std1 = self.__std
mu2 = distrib.get_mu()
std2 = distrib.get_std()
assert has_equiv_shape(mu1, mu2.shape)
assert mu1.shape[0] == 1 or mu2.shape[0] == 1 or mu1.shape[0] == mu2.shape[0]
assert mu1.shape[1] == 1 or mu2.shape[1] == 1 or mu1.shape[1] == mu2.shape[1]
if mu1.shape[0] == 1 and mu2.shape[0] > 1:
mu1 = np.tile(mu1, (mu2.shape[0],1))
std1 = np.tile(std1, (mu2.shape[0],1))
# currently, I did not implement the true product of two Gaussian distributions, only the special cases for
# either std=Inf or std=0
# possible cases:
# std1=Inf -> mu=mu2, std=std2
# std1=0 -> mu=mu1, std=std1, assert std2!=0
# 0<std1<Inf -> std2=Inf -> mu=mu1, std=std1
# std2=0 -> mu=mu2, std=std2
# assert std==Inf or 0
is_inf1 = np.isinf(std1).ravel()
is_zero1 = (std1 == 0).ravel()
is_real1 = ~(is_inf1 | is_zero1)
is_inf2 = np.isinf(std2).ravel()
is_zero2 = (std2 == 0).ravel()
is_real2 = ~(is_inf2 | is_zero2)
assert np.all(~(is_zero1 & is_zero2))
assert np.all(~(is_real1 & is_real2))
# all right hand sides use fancy indexing, thus we do not need to explicitly copy the slices.
if mu1.shape[0] > 1 and mu2.shape[0] == 1:
mu1[is_inf1,:] = mu2[0,:]
std1[is_inf1,:] = std2[0,:]
if is_zero2:
mu1[:,:] = mu2[0,:]
std1[:,:] = std2[0,:]
else:
mu1[is_inf1,:] = mu2[is_inf1,:]
std1[is_inf1,:] = std2[is_inf1,:]
mu1[is_zero2,:] = mu2[is_zero2,:]
std1[is_zero2,:] = std2[is_zero2,:]
# we need the explicit assignment, since the francy indexing above created copies
self.__mu = mu1
self.__std = std1
@profile
def prod(self, distribs):
for distrib in distribs:
self.__prod_single(distrib)
def dot(self, distrib):
# self: N x 1 (Gauss)
# distrib: 1 x k2 (Gauss)
# out: N x k2 (Cat)
# should never be called since we never calculate an outgoing alpha from a Gaussian node
assert False
@profile
def dot_transpose(self, distrib):
# self: N x 1 (Gauss)
# distrib: k1 x 1 (Gauss)
# out: N x k1 (Cat)
assert isinstance(distrib, DistribGauss)
k1 = distrib.get_k1()
mu = distrib.get_mu().reshape((1, -1)) # 1 x k1
std = distrib.get_std().reshape((1, -1)) # 1 x k1
N = self.get_k1()
data_m = self.get_mu() # N x 1
data_s = self.get_std() # N x 1
values_log = np.empty((N, k1))
# handle unobserved data
isinf = np.isinf(data_s).ravel()
if np.any(isinf):
isnotinf = np.logical_not(isinf)
# scipy_d = norm(mu, std) # scipy normal distribution
# values_log[isnotinf, :] = scipy_d.logpdf(data_m[isnotinf, :])
# this is ~100 times faster than the scipy implementation
values_log[isnotinf, :] = norm_logpdf(data_m[isnotinf, :], mu, std)
values_log[isinf, :] = 0
else:
values_log = norm_logpdf(data_m, mu, std)
log_const = normalize_convex_log(values_log, axis=1)
distrib_res = DistribCat(N, k1)
distrib_res.init_data_raw(values_log, log_const)
return distrib_res
@profile
def init_samples(self, samples, k):
assert k == 1
mu, std = self.__samples2distrib(samples)
self.init_data(mu, std)
@profile
def extract_samples(self, mode):
samples = self.__mu
return samples
@profile
def init_uniform(self):
self.__mu = np.zeros((self._k1, 1))
self.__std = np.empty((self._k1, 1))
self.__std.fill(np.inf)
@profile
def init_random(self):
mu = np.random.randn(self._k1, 1) * 0.05
std = (np.abs(np.random.randn(self._k1, 1)) * 0.05) + 1
self.init_data(mu, std)
@profile
def init_data_raw(self, mu, std):
self.__mu = mu
self.__std = std
@profile
def init_data(self, mu, std=None):
assert has_equiv_shape(mu, (self._k1, 1))
if std is not None:
assert has_equiv_shape(std, (self._k1, 1))
self.__mu = mu
self.__std = std
@profile
def init_distrib_idx(self, distrib, idx=None):
assert isinstance(distrib, DistribGauss)
x = distrib.get_mu()
if idx is None:
# initialize prior and thus average over all cases
mu = np.nanmean(x, axis=0, keepdims=True)
else:
# select cases idx
mu = x[idx, :]
idx_nan = np.isnan(mu)
if np.any(idx_nan):
# we need to randomly select new values for all NaNs
idx_good = np.ones_like(idx, dtype=bool)
idx_good[idx, :] = False
idx_good[np.isnan(x)] = False
x_good = x[idx_good, :]
num_nan = np.count_nonzero(idx_nan)
mu[idx_nan] = np.random.choice(x_good, num_nan, replace=False)
mu = np.copy(mu) # make sure to not overwrite data
std = np.empty_like(mu)
std.fill(np.asscalar(np.nanstd(x)))
self.init_data(mu, std)
@profile
def init_distrib_percentiles(self, distrib):
assert isinstance(distrib, DistribGauss)
x = distrib.get_mu()
k = self._k1
percentiles = 100 * (np.array(range(k)) + 0.5) / k
mu = np.nanpercentile(x, percentiles)[:,np.newaxis]
mu = np.copy(mu) # make sure to not overwrite data
std = np.empty_like(mu)
std.fill(np.asscalar(np.nanstd(x)))
self.init_data(mu, std)
@profile
def init_distrib_equidistant(self, distrib, std_rel=1):
assert isinstance(distrib, DistribGauss)
x = distrib.get_mu()
k = self._k1
percentiles = 1 * (np.array(range(k)) + 0.5) / k
min_x = np.min(x)
max_x = np.max(x)
range_x = max_x - min_x
mu = min_x + (percentiles * range_x)
mu = mu[:,np.newaxis]
std = np.empty_like(mu)
std.fill(np.asscalar(std_rel * np.nanstd(x) / k))
self.init_data(mu, std)
@profile
def init_distrib_equidistant_rand(self, distrib, std=1, std_rand=0.5):
self.init_distrib_equidistant(distrib, std)
x = distrib.get_mu()
k = self._k1
self.__mu += np.random.randn(k, 1) * (std_rand * np.nanstd(x) / k)
# @profile
# def update_prior(self, distrib, N_0):
# # self: 1 x k2=1
# # distrib: N x k2=1
# mu = distrib.get_mu()
# std = distrib.get_std()
# assert np.all(std == 0)
#
# self.init_data(np.mean(mu).reshape(1,1), np.std(mu).reshape(1,1))
@profile
def update_cpd(self, cat, gauss, N_0, exclude_self=False):
# self: k1 x 1 (mean + std)
# cat: N x k1
# gauss: N x 1 (mean + std)
if not exclude_self:
# SPEEDUP possibility: this cat message has been already calculated during inference; could reuse that value
message_GaussToCat = gauss.dot_transpose(self)
cat.prod([message_GaussToCat])
q = cat.get_values_norm().T # k1 x N
data_m = gauss.get_mu().T # 1 x N
data_s = gauss.get_std().T # 1 x N
# ignore unobserved data
isnotinf = ~np.isinf(data_s).ravel()
if not np.all(isnotinf):
q = q[:,isnotinf]
data_m = data_m[:,isnotinf]
data_s = data_s[:,isnotinf]
N_k = np.sum(q, axis=1, keepdims=True) # k1 x 1
N_safe = N_k.copy()
N_safe[N_k == 0] = 1
mu = q.dot(data_m.T) / N_safe # k1 x 1
d_data_sq = (data_m - mu)**2 # k1 x N
var_biased = np.sum(q * d_data_sq, axis=1, keepdims=True) / (N_k + N_0)
var_all = np.var(data_m, keepdims=True)
var = var_biased + (var_all * N_0 / (N_k + N_0))
std = np.sqrt(var)
# # A pseudo-count of N_0 and prior variance var_all is equivalent of an inverse-gamma prior with
# alpha = N_0 / 2
# scale = var_all * ((N_0 / 2) + 1) # (=beta)
# lklhd_prior = invgamma.pdf(var, alpha, 0, scale)
# lklhd_mode = invgamma.pdf(var_all, alpha, 0, scale)
# lklhd_prior -= lklhd_mode
assert np.all(np.isfinite(mu))
assert np.all(np.isfinite(std))
assert np.all(std>0)
self.init_data(mu, std)
def as_str(self, num=None):
mu = cut_max(self.__mu, num)
std = cut_max(self.__std, num)
result = 'mu={}'.format(mu)
result += '\nstd={}\n'.format(std)
return result
def visual_get_kde(self):
mu = self.__mu.ravel()
density = gaussian_kde(mu)
xs = np.linspace(mu.min(),mu.max(),200)
density.covariance_factor = lambda : .25
density._compute_covariance()
return xs, density(xs)
def visual_get_density(self, xs):
xs_distrib = DistribFactory.from_samples(xs, 1) # DistribGauss
dens_distrib = xs_distrib.dot_transpose(self)
dens = dens_distrib.get_values()
return dens
@staticmethod
@profile
def __samples2distrib(samples):
# continuous case
assert np.issubsctype(samples, np.float)
N = samples.shape[0]
mu = samples.copy().reshape((N, 1))
std = np.zeros_like(mu)
# handle NaNs
isnan_mu = np.isnan(mu)
std[isnan_mu] = np.inf
mu[isnan_mu] = np.nan
return mu, std
class DistribFactory(object):
@staticmethod
def empty(k1, k2):
# k2 = 1 => DistribGauss
# k2 > 2 => DistribCat
# k1 = 1 => Prior or N=1
# k1 > 1 => CPD or N>1
if k2 == 1:
# DistribGauss
distrib = DistribGauss(k1)
else:
# DistribCat
distrib = DistribCat(k1, k2)
return distrib
@classmethod
def random(cls, k1, k2):
distrib = cls.empty(k1, k2)
distrib.init_random()
return distrib
@classmethod
def uniform(cls, k1, k2):
distrib = cls.empty(k1, k2)
distrib.init_uniform()
return distrib
@classmethod
def from_samples(cls, samples, k):
has_equiv_shape(samples, (None, 1))
N = samples.shape[0]
distrib = cls.empty(N, k)
distrib.init_samples(samples, k)
return distrib
"""
Init (id_parent, id_child) distribution:
- needs x (beta) of all children
- init prior of id_parent
- init edge of (id_parent, id_child)
Init Cat prior / CPD:
- random
Init Gauss prior:
- use data from actual node
Init Gauss CPD:
- use data from all children with common parent (only observed nodes!)
"""
class ParameterUpdate(GraphManipulator):
"""This class handles the parameter update step."""
def __init__(self, graph, inference):
super(ParameterUpdate, self).__init__(graph)
assert isinstance(inference, BeliefPropagation)
# requires prop_graph: N
# requires prop_node: x, prior
# requires prop_edge: message_1to2, message_2to1
# updates prop_edge: distrib
self._register_properties()
# requires get_messages_prod() from the inference algorithm
self._inference = inference
self.N_0 = 1
self.print_distrib = False
# pseudo count for categorical/variance prior
# (sensible range: 0 <= N_0 < Inf)
# N_0 = 0: no prior, fallback to ML solution (warning: might lead to infinite variances!)
@profile
def _run(self, id_root, recursive):
self.__update_pass(id_root, id_except=None, recursive=recursive)
@profile
def __update_pass(self, id_node, id_except, recursive):
g = self._graph
func_node = lambda id, node: self.__update_prior(id, node)
func_edge = lambda id1, id2, edge: self.__update_distrib(id1, id2, edge)
g.func_iter(id_node, recursive, func_node, func_edge)
@profile
def __update_prior(self, id_node, node):
if node.prior is not None:
i = self._inference
messages_prod = i.get_messages_prod(id_node, id_except=None)
if self.print_distrib:
self._print('update prior of id_node={}'.format(id_node))
#self._print(' BEFORE: ' + node.prior.as_str())
node.prior.update_prior(messages_prod, N_0=self.N_0)
# if self.print_distrib:
# self._print(' AFTER: ' + node.prior.as_str())
node.q_dirty = True
@profile
def __update_distrib(self, id_node1, id_node2, edge):
i = self._inference
messages_prod1 = i.get_messages_prod(id_node1, id_except=id_node2) # M1: N x k1
messages_prod2 = i.get_messages_prod(id_node2, id_except=id_node1) # M2: N x k2
distrib = edge.distrib # distrib: k1 x k2
if self.print_distrib:
self._print('update distrib of id_nodes=({},{})'.format(id_node1, id_node2))
#self._print(' BEFORE: ' + distrib.as_str())
distrib.update_cpd(messages_prod1, messages_prod2, N_0=self.N_0)
# if self.print_distrib:
# self._print(' AFTER: ' + distrib.as_str())
self._graph.node(id_node1).q_dirty = True
self._graph.node(id_node2).q_dirty = True
class ParameterLearning(GraphManipulator):
"""This class handles the parameter update step."""
def __init__(self, graph, data, inference, parameter_update):
super(ParameterLearning, self).__init__(graph)
assert isinstance(data, Data)
assert isinstance(inference, BeliefPropagation)
assert isinstance(parameter_update, ParameterUpdate)
self._prop_graph = set()
self._prop_node = set()
self._prop_edge = set()
self._register_properties()
# requires run() from the inference algorithm and parameter_updates
self._data = data
self._inference = inference
self._parameter_update = parameter_update
self.lklhd_mindiff = 1e-3
self.count_max = 100
self.restarts = 2
self.print_restarts = True
self.print_every = 10
self.print_em = False
self.restart_recursive = True
self.draw_density = False
self._copy_prop_node = {'prior'}
self._copy_prop_edge = {'distrib'}
@profile
def _run(self, id_root, recursive):
result = self._find_best_restart(id_root, recursive)
return result
@profile
def find_best_k(self, id_root):
g = self._graph
K = 2
step_log = -1
increase = True
K_max = 64
K_best = None
bic_best = -np.inf
cont = True
print_restarts_bak = self.print_restarts
self.print_restarts = False
while K <= K_max:
g.set_k(id_root, K)
self._data.run(id_root, recursive=False)
lklhd = self._find_best_restart(id_root, recursive=False)
# calculate BIC
dof = g.get_dof(id_root)
bic = g.N * lklhd - (dof * np.log(g.N) / 2)
self._print('binary search: K={}, BIC={}, lklhd={}, dof={}, N={}'.format(K, bic, lklhd, dof, g.N))
if bic > bic_best:
K_best = K
bic_best = bic
forward = True
else:
increase = False
forward = False
step_log += 1 if increase else -1
if step_log < 0:
break
step = np.power(2,step_log)
K += step if forward else -step
pass
self._print('binary search: K_best={}, BIC_best={}'.format(K_best, bic_best))
g.set_k(id_root, K_best)
self._data.run(id_root, recursive=False)
self.print_restarts = print_restarts_bak
lklhd = self._find_best_restart(id_root, recursive=False)
return lklhd
@profile
def _find_best_restart(self, id_root, recursive):
if not recursive:
restart_recursive = False
else:
restart_recursive = self.restart_recursive
if self.restarts > 0:
lklhd = self.__expectation_maximization(id_root, restart_recursive)
lklhd_best = lklhd
num_best = 0
lklhds = [lklhd]
self.__parameters_copy(id_root, restart_recursive)
for num in range(1, self.restarts+1):
self._data.run(id_root, restart_recursive)
lklhd = self.__expectation_maximization(id_root, restart_recursive)
lklhds.append(lklhd)
if lklhd > lklhd_best:
lklhd_best = lklhd
num_best = num
self.__parameters_copy(id_root, restart_recursive)
lklhd = lklhd_best
self.__parameters_recover_copy(id_root, restart_recursive)
lklhd2 = self._inference.run(id_root, restart_recursive)
assert np.allclose(lklhd, lklhd2) # just to double-check
if self.print_restarts:
self._print('EM restarts: id_root={}, best of {} is {} with lklhd={} (max diff={}); recursive={}; restart_recursive={}'.format(id_root, len(lklhds), num_best+1, lklhd, lklhd - min(lklhds), recursive, restart_recursive))
# finally do a full recursive run
if ((not restart_recursive) and recursive) or (self.restarts == 0):
lklhd = self.__expectation_maximization(id_root, recursive)
return lklhd
def __parameters_copy(self, id_root, recursive):
g = self._graph
g.properties_func(id_root, GraphObject.copy, recursive, self._copy_prop_node, self._copy_prop_edge)
def __parameters_recover_copy(self, id_root, recursive):
g = self._graph
#g.properties_func(id_root, GraphObject.recover_copy, recursive, self.__copy_prop_node, self.__copy_prop_edge)
def func_node(id, node):
GraphObject.recover_copy(node, self._copy_prop_node)
node.q_dirty = True
def func_edge(id1, id2, edge):
GraphObject.recover_copy(edge, self._copy_prop_edge)
g.func_iter(id_root, recursive=recursive, func_node=func_node, func_edge=func_edge)
@profile
def __expectation_maximization(self, id_root, recursive):
lklhd_last = self._inference.run(id_root, recursive)
if self.print_em:
self._print('id_root={}, recursive={}, lklhd={}'.format(id_root, recursive, lklhd_last))
if self.draw_density:
self.__draw_density(id_root)
count = 1
continue_condition = True
while continue_condition:
self._parameter_update.run(id_root, recursive)
lklhd = self._inference.run(id_root, recursive)
lklhd_diff = lklhd - lklhd_last
condition_lklhd = lklhd_diff > self.lklhd_mindiff
condition_count = count < self.count_max
continue_condition = condition_count & condition_lklhd
if self.print_em:
if (~continue_condition) | (count % self.print_every == 0) | (lklhd_diff < 0):
self._print('count={}, lklhd={}, lklhd_diff={}'.format(count, lklhd, lklhd_diff))
if self.draw_density:
self.__draw_density(id_root)
if not condition_lklhd:
self._print('Terminate because lklhd_diff <= mindiff.')
if not condition_count:
self._print('Terminate because count_max is reached.')
count += 1
lklhd_last = lklhd
if self.draw_density:
plt.cla()
#self.__draw_density(id_root)
return lklhd
def __draw_density(self, id_root):
# draw the density of the first child, if it is continuous
children = [id for id in self._graph.out_edges_iter(id_root)]
if len(children) > 0 and self._graph.node(children[0]).k == 1:
self._inference.visual_gauss_edge(id_root, children[0])
plt.gca().set_prop_cycle(None)
class StructureUpdate(GraphManipulator):
"""Updates graph structure."""
def __init__(self, graph, data, inference, parameter_learning):
super(StructureUpdate, self).__init__(graph)
assert isinstance(data, Data)
self._data = data
assert isinstance(inference, BeliefPropagation)
self._inference = inference
assert isinstance(parameter_learning, ParameterLearning)
self._parameter_learning = parameter_learning
self.required_axes = 2 # for lklhd_pot_diff and lklhd_pot_diff_siblings
# updates graph structure
self.k_default = 10 # default k for new hidden nodes
self.N_0 = 1 # should be the same N_0 as in ParameterUpdate, should find better solution to share the same value
self.__lklhd_pot_diff = np.zeros((0, 0))
self.lklhd_pot_diff_root = np.zeros((0,))
self.lklhd_pot_diff_siblings = np.zeros((0, 0))
self.lklhd_pot_diff_dirty = np.zeros((0,), dtype=bool)
self.is_observed = np.zeros((0,))
self.draw_figure = False
self.show_value_text = False
self.keep_old_parents = False
self.max_of_cp_and_pc = False
self.balance_k = False # balance the lklhd_pot_diff value according K of each node; does not seem to work well
self.find_best_k = False
@profile
def add_gaussian_parents(self):
# add gaussian parents
nodes = self._graph.nodes()
for id_node in nodes:
self.__gaussian_add_parent(id_node)
@profile
def remove_gaussian_parents(self):
g = self._graph
num_removed = 0
# can't use iterator, since nodes are deleted while iterating
nodes_data = g.nodes(data=True)
for id_node, node in nodes_data:
if node.k == 1:
id_parent = g.get_parent(id_node)
if self._is_single_continuous_parent(id_parent):
g.remove_edge(id_parent, id_node)
g.remove_node(id_parent)
num_removed += 1
self._print('removed {} gaussian parents'.format(num_removed))
@profile
def run(self):
# TODO:
# - proper update for removing edges: we need to calculate the lklhd_diff for the siblings of the
# parent_old as regularization condition
# - lazy calculation of lklhd_pot_diff and lklhd_pot_diff_siblings (V)
# - speedup of get_messages_prod()
# - speedup of update_lklhd_pot_diff!!!
i = self._inference
g = self._graph
lklhd_old = i.get_lklhd_all()
# self._print('lklhd_pot_diff_dirty={}'.format(np.where(self.lklhd_pot_diff_dirty)[0]))
self.update_lklhd_pot_diff()
if self.draw_figure:
g.draw_axes()
self.draw_axes()
plt.show()
id_parent, id_child, create_node = self.select_update()
if id_parent is not None:
same_tree = g.get_root(id_parent) == g.get_root(id_child)
pot_diff = self.__lklhd_pot_diff[id_parent, id_child]
else:
same_tree = True
pot_diff = None
self._print("select=({},{}); create_node={}; same_tree={}; pot_diff={}".format(id_parent, id_child, create_node, same_tree, pot_diff))
id_roots_changed = set()
if (id_parent is not None) or (id_child is not None):
assert id_child is not None
# update dirty
self._update_lklhd_pot_diff_dirty(id_parent)
self._update_lklhd_pot_diff_dirty(id_child)
#pot_diff = self.lklhd_pot_diff[id_parent, id_child]
# this fails if (id_parent = None) and child.k == 1
child_is_gauss = g.node(id_child).k == 1
if id_parent is None and child_is_gauss:
distrib_new = None
else:
_, _, distrib_new = self.__calc_lklhd_pot(id_parent, id_child)
id_roots_changed |= self.__remove_old_parent(id_child)
if not create_node:
# we need to add an edge to the new parent
self.__add_edge_and_init(id_parent, id_child, distrib_new )
else:
assert id_parent is not None # should never happen that we create a new node and only add a single child
# first remove the possible old_parent of parent
id_roots_changed |= self.__remove_old_parent(id_parent)
# we need to create a new hidden node and two new edges
id_parent_2nd = g.add_node(self.k_default)
g.add_edge(id_parent_2nd, id_parent)
g.add_edge(id_parent_2nd, id_child)
# TODO: do here non-recursive parameter optimization; then we should be able to guarantee the lklhd_diff in the assert below!
if self.find_best_k:
self._parameter_learning.find_best_k(id_parent_2nd)
else:
self._data.run(id_parent_2nd, recursive=False)
id_roots_changed |= {g.get_root(id_child)}
self._print('id_roots_changed={}'.format(id_roots_changed))
i.run(id_roots_changed) # run inference on all changed trees
lklhd = i.get_lklhd_all()
# double check that out predicted difference is close to the real difference
lklhd_diff = lklhd - lklhd_old
if (not create_node) and (not same_tree):
# if we create a new node, then the increase is only guaranteed after parameter optimization
# assert np.isclose(lklhd_diff, pot_diff) or (lklhd_diff - pot_diff > 0)
if not (np.isclose(lklhd_diff, pot_diff) or (lklhd_diff - pot_diff > 0)):
warnings.warn("lklhd_diff - pot_diff = {}".format(lklhd_diff - pot_diff))
# probably happens when an old_parent is removed and a new edge from parent_parent to siblig is created:
# the edge nodes are within the same tree and thus the true distribution needs to be obtained by sending a message between them -> costly!
else:
lklhd = None
return lklhd, id_roots_changed
@profile
def __add_edge_and_init(self, id_node1, id_node2, distrib_new):
# TODO: implemet this function and use it all times a new edge is added.
# Maybe: use similar function for initializing new node.
g = self._graph
i = self._inference
if id_node1 is not None:
g.add_edge(id_node1, id_node2)
g.edge(id_node1, id_node2).distrib = distrib_new
# init the edge messages, for the rare case that afterwards another __remove_old_parent() is called that
# uses the same node
i.run(id_roots=id_node1, recursive=False)
else:
# id_node2 might be gaussian, then we need to add a new parent
id_parent_new = self.__gaussian_add_parent(id_node2)
if id_parent_new is None:
# if not, then we can initialize with distrib_new
g.node(id_node2).prior = distrib_new
else:
self._data.run(id_roots=id_parent_new, recursive=False)
@profile
def __remove_old_parent(self, id_node):
g = self._graph
id_parent = g.get_parent(id_node)
id_roots_changed = set()
if id_parent is not None:
# we need to remove the edge to the old parent first
g.remove_edge(id_parent, id_node)
if self.keep_old_parents:
# nothing else to do, since all parents are kept
return {g.get_root(id_parent)}
out_degree_parent = g.out_degree(id_parent)
id_parent_parent = g.get_parent(id_parent)
if id_parent_parent is not None:
parent_has_parent = True
else:
parent_has_parent = False
if out_degree_parent == 1:
id_sibling = [id for id in g.out_edges_iter(id_parent)][0]
sibling_continuous = g.node(id_sibling).x is not None
else:
id_sibling = None
sibling_continuous = False
parent = g.node(id_parent)
if out_degree_parent <= 1 and not (out_degree_parent==1 and not parent_has_parent and sibling_continuous):
# case 1: out_degree_parent==0 & parent_has_parent -> (1a) delete edge between parent_parent and parent (1b) delete node parent
# case 2: out_degree_parent==0 & ~parent_has_parent -> (2a) delete node parent
# case 3: out_degree_parent==1 & parent_has_parent -> (3a) delete edge between parent_parent and parent, (3b) remove edge between parent and sibling, (3c) add edge between parent_parent and sibling (3d) delete node parent
# (V) case 4: out_degree_parent==1 & ~parent_has_parent & sibling_continuous -> do nothing
# case 5: out_degree_parent==1 & ~parent_has_parent & ~sibling_continuous -> (4a) delete edge between parent and sibling and (4b) delete node parent
if parent_has_parent:
# (1a, 3a) delete edge between parent_parent and parent
if out_degree_parent==1:
g.remove_edge(id_parent_parent, id_parent)
# (3b) remove edge between parent and sibling
_, _, distrib_new = self.__calc_lklhd_pot(id_parent_parent, id_sibling)
g.remove_edge(id_parent, id_sibling)
# (3c) add edge between parent_parent and sibling
self.__add_edge_and_init(id_parent_parent, id_sibling, distrib_new)
id_roots_changed |= {g.get_root(id_parent_parent)}
else:
# recursively remove the old parent
id_roots_changed |= self.__remove_old_parent(id_parent)
else:
if out_degree_parent==1:
assert not sibling_continuous # just to double check, this case should have been excluded in the first 'if'
# (4a) delete edge between parent and sibling
g.remove_edge(id_parent, id_sibling)
id_roots_changed |= {id_sibling}
# (1b, 2a, 3d, 4b) delete node parent
g.remove_node(id_parent)
else:
id_roots_changed |= {g.get_root(id_parent)}
return id_roots_changed
@profile
def __gaussian_add_parent(self, id_node):
g = self._graph
node = g.node(id_node)
id_parent = None
if node.k == 1:
# only for gaussian nodes
if not g.has_parent(id_node):
# add new hidden parent if node does not have one
id_parent = self.__add_node_with_children([id_node])
return id_parent
@profile
def __add_node_with_children(self, id_children):
g = self._graph
id_node = g.add_node(self.k_default)
for id_child in id_children:
g.add_edge(id_node, id_child)
return id_node
@profile
def add_nodes_from_adjlist(self, adjlist):
# adjlist must obey the rules:
# - already existing nodes must have no children
# - new nodes must have ids in increasing order
# - ids are only allowed as child if they have been defined before
self._print('adding {} nodes from adjacency list'.format(len(adjlist)))
ide2id = {}
for line in adjlist:
ide_node = line[0]
ide_children = line[1:]
# these checks assume a tree structure
if self._graph.has_node(ide_node):
# if node already exists, then it must be one of the data input ones and thus doesn't have children
assert not ide_children
ide2id[ide_node] = ide_node
else:
# if node does not exist yet, then it should be one of the hidden nodes and thus must have children
if ide_children:
# map ide to id
id_children = [ide2id[ide] for ide in ide_children if ide2id[ide] is not None]
id_node = self.__add_node_with_children(id_children)
ide2id[ide_node] = id_node
else:
# otherwise ignore node
ide2id[ide_node] = None
self._print('ide_node={} does not have children, leave it out'.format(ide_node))
# # assert id consistency
# assert ide_node == id_node
pass
@profile
def update_lklhd_pot_diff(self):
g = self._graph
i = self._inference
id_node_next = g.get_id_node_next()
self.__lklhd_pot_diff = expand_array(self.__lklhd_pot_diff, (id_node_next, id_node_next))
self.lklhd_pot_diff_root = expand_array(self.lklhd_pot_diff_root, (id_node_next,))
self.lklhd_pot_diff_siblings = expand_array(self.lklhd_pot_diff_siblings, (id_node_next, id_node_next))
self.is_observed = expand_array(self.is_observed, (id_node_next,))
self.lklhd_pot_diff_dirty = expand_array(self.lklhd_pot_diff_dirty, (id_node_next,), True)
pl = ProgressLine(prefix=self._print_prefix() + "update_lklhd_pot_diff ")
for id_child in range(id_node_next):
# case of new root node (i.e. removing the actual parent)
if self.lklhd_pot_diff_dirty[id_child]:
if self.is_allowed_root(id_child):
diff_root, _ = self.calc_lklhd_pot_diff(None, id_child)
else:
diff_root = np.nan
self.lklhd_pot_diff_root[id_child] = diff_root
# case of adding new edge
if self.is_allowed_child(id_child):
for id_parent in range(id_node_next):
if self.lklhd_pot_diff_dirty[id_parent] or self.lklhd_pot_diff_dirty[id_child]:
# self._print('update lklhd_pot_diff({},{})'.format(id_parent, id_child))
if self.is_allowed(id_parent, id_child):
diff, message_child2parent_pot = self.calc_lklhd_pot_diff(id_parent, id_child)
diff_siblings = self.calc_lklhd_pot_diff_siblings(id_parent, message_child2parent_pot)
if diff_siblings.size == 0:
diff_siblings_min = np.nan # nan for no siblings
else:
diff_siblings_min = np.min(diff_siblings)
if g.has_parent(id_child):
if self.is_allowed_root(id_child):
# we need to add the diff_root, since the old parent will be removed
diff_root = self.lklhd_pot_diff_root[id_child]
assert not np.isnan(diff_root)
diff += diff_root
else:
diff = np.nan
diff_siblings_min = np.nan
self.__lklhd_pot_diff[id_parent, id_child] = diff
self.lklhd_pot_diff_siblings[id_parent, id_child] = diff_siblings_min
else:
self.__lklhd_pot_diff[:, id_child] = np.nan
self.lklhd_pot_diff_siblings[:, id_child] = np.nan
perc = int(np.round(100 * (id_child + 1) / id_node_next))
pl.progress(perc)
pl.finish()
# fill in the is_observed variable
#parents = g.nodes_iter()
for id_parent in range(id_node_next):
if g.has_node(id_parent):
if g.node(id_parent).x is not None:
is_observed = True
else:
is_observed = False
else:
is_observed = np.nan
self.is_observed[id_parent] = is_observed
self.lklhd_pot_diff_dirty = np.zeros_like(self.lklhd_pot_diff_dirty, dtype=bool)
@profile
def calc_lklhd_pot_diff(self, id_parent_pot, id_child):
i = self._inference
lklhd = i.get_lklhd(id_child)
lklhd_pot, message_child2parent_pot, _ = self.__calc_lklhd_pot(id_parent_pot, id_child)
lklhd_pot = np.mean(lklhd_pot)
lklhd_pot_diff = lklhd_pot - lklhd
return lklhd_pot_diff, message_child2parent_pot
@profile
def calc_lklhd_pot_diff_sibling(self, id_parent_pot, id_sibling, message_child2parent_pot):
i = self._inference
lklhd = i.get_lklhd(id_sibling)
lklhd_pot = i.calc_lklhd_parent(id_sibling, id_parent_pot, message_child2parent_pot)
lklhd_pot = np.mean(lklhd_pot)
lklhd_pot_diff = lklhd_pot - lklhd
return lklhd_pot_diff
@profile
def calc_lklhd_pot_diff_siblings(self, id_parent_pot, message_child2parent_pot):
g = self._graph
edges = g.out_edges_iter(id_parent_pot)
lklhd_pot_diff = [self.calc_lklhd_pot_diff_sibling(id_parent_pot, id_sibling, message_child2parent_pot) for id_sibling in edges]
return np.array(lklhd_pot_diff)
@profile
def _get_lklhd_pot_diff(self):
lklhd_pot_diff = self.__lklhd_pot_diff.copy()
if self.max_of_cp_and_pc:
tmp = lklhd_pot_diff.flatten()
tmpT = lklhd_pot_diff.T.flatten()
idx = (~np.isnan(tmp)) & (~np.isnan(tmpT))
tmp[idx] = np.maximum(tmp[idx], tmpT[idx])
tmp = np.reshape(tmp, lklhd_pot_diff.shape)
lklhd_pot_diff = tmp
if self.balance_k:
k = self._get_node_infos(self._k)
lklhd_pot_diff *= np.sqrt(k[:,np.newaxis]) * np.sqrt(k[np.newaxis,:])
return lklhd_pot_diff
@profile
def __calc_lklhd_pot_messages(self, messages_prod1, messages_prod2, message_1to2_old):
"""
:param messages_prod1: q(parent_pot) (WARNING: this variable is changed in-place)
:param messages_prod2: beta(child)
:param message_1to2_old: alpha^before(child)
:return:
"""
if messages_prod1 is not None:
k1 = messages_prod1.get_k2()
else:
k1 = 1
k2 = messages_prod2.get_k2()
distrib = DistribFactory.empty(k1, k2) # distrib: k1 x k2
messages_prod2_all = messages_prod2.copy()
messages_prod2_all.prod([message_1to2_old]) # q(child)
if messages_prod1 is not None:
# exclude_self needs to be true, since we calculate the CPD without having an edge between the nodes
distrib.update_cpd(messages_prod1, messages_prod2_all, N_0=self.N_0, exclude_self=True)
# node is gaussian -> we need to sum over parent_pot
# (if none of the above applies, then we can sum over either node or parent_pot)
# we need message node -> parent_pot, i.e. we calculate the lklhd from the parent
message_child2parent_pot = messages_prod2.dot_transpose(distrib)
messages_prod1.set_log_const_zero()
messages_prod1.prod([message_child2parent_pot])
messages_prod = messages_prod1
else:
distrib.update_prior(messages_prod2_all, N_0=self.N_0)
# parent_pot is None -> we need to sum over node
# no message exists and we only need to multiply with the prior
messages_prod2.prod([distrib])
messages_prod = messages_prod2
message_child2parent_pot = None
lklhd = messages_prod.get_log_const()
return (lklhd, message_child2parent_pot, distrib)
@profile
def __calc_lklhd_pot(self, id_parent_pot, id_child):
g = self._graph
i = self._inference
id_parent = g.get_parent(id_child) # might be None
# get potential distrib for (parent_pot, node)
# the child needs to remove its old parent message (either from parent or prior)
messages_prod2 = i.get_messages_prod(id_child, id_except=id_parent, include_prior=False) # M2: N x k2
if id_parent is not None:
message_1to2_old = i.get_message(id_parent, id_child)
else:
message_1to2_old = g.node(id_child).prior
if id_parent_pot is not None:
# the potential parent uses all its incoming messages
messages_prod1 = i.get_messages_prod(id_parent_pot, id_except=None) # M1: N x k1
else:
messages_prod1 = None
lklhd, message_child2parent_pot, distrib = self.__calc_lklhd_pot_messages(messages_prod1, messages_prod2, message_1to2_old)
return lklhd, message_child2parent_pot, distrib
def _update_lklhd_pot_diff_dirty(self, id_node):
if id_node is None:
return
g = self._graph
id_root = g.get_root(id_node)
def func_node(id_node_, node_):
self.lklhd_pot_diff_dirty[id_node_] = True
g.func_iter(id_roots=id_root, func_node=func_node)
@profile
def draw(self):
fig, ax = plt.subplots(1,self.required_axes)
if not isinstance(ax, collections.Iterable):
ax = [ax]
self.draw_axes(ax)
plt.show()
@profile
def draw_axes(self, ax=None):
# concatenate lklhd_pot_diff and lklhd_pot_diff_root
lpd = self.__lklhd_pot_diff
lpdr = self.lklhd_pot_diff_root[np.newaxis,:]
pad = np.full_like(lpdr, np.nan)
data = np.concatenate((lpd, pad, lpdr), axis=0)
lpds = self.lklhd_pot_diff_siblings
if ax is None:
ax = self._graph.get_axes(self.id_axes)
assert len(ax) == self.required_axes
# imshow lklhd_pot_diff
ax[0].set_anchor('N')
imshow_values(data, ax[0], show_value_text=self.show_value_text)
# imshow lklhd_pot_diff_siblings
ax[1].set_anchor('N')
imshow_values(lpds, ax[1], show_value_text=self.show_value_text)
@profile
def _is_single_continuous_parent(self, id_node):
g = self._graph
if g.has_node(id_node):
id_children = [x for x in g.out_edges_iter(id_node)]
result = (len(id_children) == 1) and (g.node(id_children[0]).k ==1)
else:
result = False
return result
@profile
def _is_observed(self, id_node):
g = self._graph
if g.has_node(id_node):
result = g.node(id_node).x is not None
else:
result = False
return result
@profile
def _layer(self, id_node):
g = self._graph
if g.has_node(id_node):
result = g.node(id_node).layer
else:
result = np.Inf
return result
@profile
def _k(self, id_node):
g = self._graph
if g.has_node(id_node):
result = g.node(id_node).k
else:
result = np.NaN
return result
@profile
def _get_node_infos(self, func):
num_nodes = self.__lklhd_pot_diff.shape[0]
l = [func(id_node) for id_node in range(num_nodes)]
ar = np.array(l)
return ar
# decorator substitute that is pickle-safe
select_max = NanSelect(select_max_undecorated)
select_weighted_random = NanSelect(select_weighted_random_undecorated)
select_random = NanSelect(select_random_undecorated)
select_random_metropolis = NanSelect(select_random_metropolis_undecorated)
@profile
def is_allowed_child(self, id_child):
return True
@profile
def is_allowed_root(self, id_child):
return False
class StructureUpdatePredefined(StructureUpdate):
def __init__(self, graph, data, inference, parameter_learning):
super(StructureUpdatePredefined, self).__init__(graph, data, inference, parameter_learning)
self._register_properties()
self.adjlist = None
self.__done = False
def run(self):
if self.adjlist is None:
raise ValueError('The variable adjlist is empty, but an adjacency list was expected!')
if not self.__done:
self.remove_gaussian_parents()
self.add_nodes_from_adjlist(self.adjlist)
self._data.run()
lklhd = self._inference.run()
self.__done = True
# set restart_recursive to True (otherwise we would only learn a part of the tree
self._parameter_learning.restart_recursive = True
else:
# already done adding nodes
lklhd = None
id_roots_changed = None
return lklhd, id_roots_changed
#ToDo:
# Implement within run() method:
# (1) remove gaussian parents (opposite of add_gaussian_parents())
# (2) add_nodes_from_adjlist
# (3) init distributions
class StructureUpdateCVPR2015(StructureUpdate):
"""This class handles the parameter update step."""
def __init__(self, graph, data, inference, parameter_learning):
super(StructureUpdateCVPR2015, self).__init__(graph, data, inference, parameter_learning)
self._register_properties()
self.lklhd_mindiff = 0.01
self.lklhd_mindiff_siblings = -0.1
self.select_strategy = StructureUpdate.select_max
self.new_node_layer_plusone = True
self.force_full_tree = False
@profile
def is_allowed_child(self, id_child):
g = self._graph
if not g.has_node(id_child):
return False
child = g.node(id_child)
if not g.is_root(id_child):
# check if child's parent is a single parent of a continuous node
if child.k == 1 and g.out_degree(g.get_parent(id_child)) == 1:
return True
else:
return False
# check if child is a single parent of a continuous node
if g.out_degree(id_child) == 1:
# this check is sufficient, since all other nodes have either degree=0 or degree>1
return False
return True
@profile
def is_allowed(self, id_parent, id_child):
g = self._graph
if not g.has_node(id_parent):
return False
if not g.has_node(id_child):
return False
parent = g.node(id_parent)
child = g.node(id_child)
if parent.k == 1:
# continuous parents are not allowed
return False
if id_parent == id_child:
# parent and child must be different
return False
if g.get_root(id_parent) == g.get_root(id_child):
# parent and child must be in different subtrees
return False
if not g.is_root(id_parent):
return False
if not g.is_root(id_child):
# check if child's parent is a single parent of a continuous node
if child.k == 1 and g.out_degree(g.get_parent(id_child)) == 1:
return True
else:
return False
# check if child is a single parent of a continuous node
if g.out_degree(id_child) == 1:
# this check is sufficient, since all other nodes have either degree=0 or degree>1
return False
return True
@profile
@staticmethod
def _calc_closed(lklhd_pot_diff, cond_is_observed):
changed = True
is_closed = np.all(np.isnan(lklhd_pot_diff), axis=1) # closed are the nodes that cannot add any other node as child
# observed nodes are always closed
is_closed = is_closed | cond_is_observed
count = 0
while changed:
lklhd_pot_diff_excl_open_children = lklhd_pot_diff.copy()
lklhd_pot_diff_excl_open_children[:, ~is_closed] = np.nan # open nodes cannot be added as child
is_closed_new = np.all(np.isnan(lklhd_pot_diff_excl_open_children), axis=1)
is_closed_new = is_closed | is_closed_new
if np.all(is_closed == is_closed_new):
changed = False
is_closed = is_closed_new
count += 1
assert count < 999 # just to debug, this should never fail
return is_closed
@profile
def _get_cond_mindiff(self, lklhd_pot_diff):
with np.errstate(invalid='ignore'):
cond_mindiff = np.isnan(lklhd_pot_diff) | (lklhd_pot_diff > self.lklhd_mindiff)
cond_siblings_mindiff = np.isnan(self.lklhd_pot_diff_siblings) | \
(self.lklhd_pot_diff_siblings > self.lklhd_mindiff_siblings)
return cond_mindiff, cond_siblings_mindiff
@profile
def select_update(self):
lklhd_pot_diff = self._get_lklhd_pot_diff()
is_observed = self._get_node_infos(self._is_observed)
layer = self._get_node_infos(self._layer)
cond_mindiff, cond_siblings_mindiff = self._get_cond_mindiff(lklhd_pot_diff)
# exclude edges that do not fulfill cond_mindiff
lklhd_pot_diff[~(cond_mindiff)] = np.nan
is_closed = self._calc_closed(lklhd_pot_diff, is_observed)
lklhd_pot_diff[:,~is_closed] = np.nan # exclude children that are open
eligible_cond = ~np.isnan(lklhd_pot_diff)
layer_max = np.maximum(layer[:, np.newaxis], layer[np.newaxis, :])
# new node is created either if cond_siblings is not fulfilled or if the new parent is observed
new_node_cond = (~cond_siblings_mindiff) | is_observed[:,np.newaxis]
if self.new_node_layer_plusone:
# if we create a new node, then it will be one layer higher
# this is different from the original cvpr algorithm
layer_max[new_node_cond] += 1
if np.any(eligible_cond):
layer_min_eligible = np.min(layer_max[eligible_cond])
# priority_cond = (~new_node_cond) & (~is_single_continuous_parent[:,np.newaxis])
priority_cond = layer_max == layer_min_eligible
if np.any(eligible_cond & priority_cond):
# check if we can continue within the priority conditions
lklhd_pot_diff[~priority_cond] = np.nan
id_parent, id_child = self.select_strategy(lklhd_pot_diff)
create_node = new_node_cond[id_parent, id_child]
else:
id_parent, id_child, create_node = None, None, None
# debug printout
lklhd_pot_diff_orig = self._get_lklhd_pot_diff()
if np.any(~np.isnan(lklhd_pot_diff_orig)):
id_parent_max, id_child_max = StructureUpdate.select_max(lklhd_pot_diff_orig)
self._print('select_max=({},{}); lklhd_pot_diff={}; is_closed[id_child_max]={}'.format(
id_parent_max, id_child_max, lklhd_pot_diff_orig[id_parent_max, id_child_max],
is_closed[id_child_max]))
if self.force_full_tree:
id_parent, id_child = id_parent_max, id_child_max
create_node = new_node_cond[id_parent, id_child]
else:
self._print('select_max=(None,None)')
return id_parent, id_child, create_node
class StructureUpdateCVPR2015Closer(StructureUpdateCVPR2015):
"""This class handles the parameter update step."""
def __init__(self, graph, data, inference, parameter_learning):
super(StructureUpdateCVPR2015Closer, self).__init__(graph, data, inference, parameter_learning)
self._register_properties()
self.__is_closed_last = np.zeros((0,))
@profile
def update_lklhd_pot_diff(self):
super(StructureUpdateCVPR2015Closer, self).update_lklhd_pot_diff()
# get is_closed array
lklhd_pot_diff = self._get_lklhd_pot_diff()
is_observed = self._get_node_infos(self._is_observed)
cond_mindiff, _ = self._get_cond_mindiff(lklhd_pot_diff)
lklhd_pot_diff[~(cond_mindiff)] = np.nan # exclude edges that do not fulfill cond_mindiff
is_closed = self._calc_closed(lklhd_pot_diff, is_observed)
# get newly closed nodes
id_newly_closed = set()
for id_root in self._graph.get_id_roots():
open_last = (id_root >= len(self.__is_closed_last)) or (not self.__is_closed_last[id_root])
open_now = (id_root >= len(is_closed)) or (not is_closed[id_root])
if open_last and not open_now:
id_newly_closed.add(id_root)
self.__is_closed_last = is_closed
if id_newly_closed:
id_newly_closed = sorted(id_newly_closed)
self._print("recursive EM on id_newly_closed={}".format(id_newly_closed))
for id_root in id_newly_closed:
self._parameter_learning.run(id_roots=id_root, recursive=True)
self._update_lklhd_pot_diff_dirty(id_root)
super(StructureUpdateCVPR2015Closer, self).update_lklhd_pot_diff()
@profile
def select_update(self):
lklhd_pot_diff = self._get_lklhd_pot_diff()
is_observed = self._get_node_infos(self._is_observed)
layer = self._get_node_infos(self._layer)
cond_mindiff, cond_siblings_mindiff = self._get_cond_mindiff(lklhd_pot_diff)
lklhd_pot_diff[~(cond_mindiff)] = np.nan # exclude edges that do not fulfill cond_mindiff
is_closed = self._calc_closed(lklhd_pot_diff, is_observed)
lklhd_pot_diff[:,~is_closed] = np.nan # exclude children that are open
eligible_cond = ~np.isnan(lklhd_pot_diff)
layer_max = np.maximum(layer[:, np.newaxis], layer[np.newaxis, :])
# new node is created either if cond_siblings is not fulfilled or if the new parent is observed
new_node_cond = (~cond_siblings_mindiff) | is_observed[:,np.newaxis]
if self.new_node_layer_plusone:
# if we create a new node, then it will be one layer higher
# this is different from the original cvpr algorithm
layer_max[new_node_cond] += 1
if np.any(eligible_cond):
layer_min_eligible = np.min(layer_max[eligible_cond])
# priority_cond = (~new_node_cond) & (~is_single_continuous_parent[:,np.newaxis])
priority_cond = layer_max == layer_min_eligible
if np.any(eligible_cond & priority_cond):
# check if we can continue within the priority conditions
lklhd_pot_diff[~priority_cond] = np.nan
id_parent, id_child = self.select_strategy(lklhd_pot_diff)
create_node = new_node_cond[id_parent, id_child]
else:
id_parent, id_child, create_node = None, None, None
return id_parent, id_child, create_node
class StructureUpdateSampling(StructureUpdate):
"""This class handles the parameter update step."""
# TODO: implement case to change edges within the same tree
def __init__(self, graph, data, inference, parameter_learning):
super(StructureUpdateSampling, self).__init__(graph, data, inference, parameter_learning)
self._register_properties()
self.select_strategy = StructureUpdate.select_weighted_random
@profile
def is_allowed(self, id_parent, id_child):
g = self._graph
if not g.has_node(id_parent):
return False
if not g.has_node(id_child):
return False
if id_parent == id_child:
# parent and child must be different
return False
parent = g.node(id_parent)
if parent.k == 1:
# continuous parents are not allowed
return False
child = g.node(id_child)
if not ((parent.layer == child.layer) or (parent.layer == child.layer+1)):
return False
if (parent.layer == child.layer):
# if a new node is created (i.e. parent.layer == child.layer), then the parent should not be a single parent of a continuous node
if g.out_degree(id_parent) == 1:
return False
# if g.get_root(id_parent) == g.get_root(id_child):
# # parent and child must be in different subtrees
# return False
if g.out_degree(id_child) == 1:
# the child should not be a single parent of a continuous node
# this check is sufficient, since all other nodes have either degree=0 or degree>1
return False
if g.has_edge(id_parent, id_child):
# the edge should not already be in the tree
return False
id_parent_parent = g.get_parent(id_parent)
if id_parent_parent == g.get_parent(id_child) and id_parent_parent is not None:
# the nodes should not have already the same parent
return False
return True
@profile
def is_allowed_root(self, id_child):
g = self._graph
if not g.has_node(id_child):
return False
if g.is_root(id_child):
return False
child = g.node(id_child)
if child.k == 1:
# continuous children without parent are not allowed
return False
return True
@profile
def select_update(self):
# TODO:
# - include removing parent option
# - better lklhd_pot_diff calculation: if old_parents are removed, this needs to be incorporated (for both, parent and child)
lklhd_pot_diff = self._get_lklhd_pot_diff()
# lklhd_pot_diff_root = self.lklhd_pot_diff_root.copy()
layer = self._get_node_infos(self._layer)
# maybe: include lklhd_pot_diff_root, i.e. the possibility to remove an edge
# if np.any(~np.isnan(lklhd_pot_diff)) or np.any(~np.isnan(lklhd_pot_diff_root)):
id_parent, id_child = self.select_strategy(lklhd_pot_diff)
# id_parent, id_child = self._select_random(lklhd_pot_diff)
if (id_parent is not None) and (id_child is not None):
create_node = layer[id_parent] == layer[id_child]
else:
create_node = None
return id_parent, id_child, create_node
class StructureUpdateSamplingFixedLayers(StructureUpdate):
"""This class handles the parameter update step."""
def __init__(self, graph, data, inference, parameter_learning):
super(StructureUpdateSamplingFixedLayers, self).__init__(graph, data, inference, parameter_learning)
self._register_properties()
self.select_strategy = StructureUpdate.select_weighted_random
self.__init_fixed_layers()
@profile
def __init_fixed_layers(self):
g = self._graph
id_node_next = g.get_id_node_next()
self.__layer = np.zeros((id_node_next,))
for id_node, node in g.nodes_iter(data=True):
self.__layer[id_node] = node.layer
@profile
def is_allowed(self, id_parent, id_child):
g = self._graph
if not g.has_node(id_parent):
return False
if not g.has_node(id_child):
return False
if id_parent == id_child:
# parent and child must be different
return False
parent = g.node(id_parent)
if parent.k == 1:
# continuous parents are not allowed
return False
if not (self.__layer[id_parent] > self.__layer[id_child]):
return False
if g.has_edge(id_parent, id_child):
# the edge should not already be in the tree
return False
return True
@profile
def select_update(self):
lklhd_pot_diff = self._get_lklhd_pot_diff()
id_parent, id_child = self.select_strategy(lklhd_pot_diff)
create_node = False
return id_parent, id_child, create_node
class StructureUpdateSamplingIterative(StructureUpdate):
"""This class handles the parameter update step."""
def __init__(self, graph, data, inference, parameter_learning):
super(StructureUpdateSamplingIterative, self).__init__(graph, data, inference, parameter_learning)
self._register_properties()
self.select_strategy = StructureUpdate.select_max
self.id_node_last = None
self.id_parent_last = None
self.id_nodes_next = []
self.__init_nodes_next()
self.__keep_old_parents_bak = None
@profile
def __init_nodes_next(self):
if not self.id_nodes_next:
self.id_nodes_next = []
for id_node in self._graph.nodes_iter():
if self.__is_allowed_root(id_node):
self.id_nodes_next.append(id_node)
# randomize order
random.shuffle(self.id_nodes_next)
@profile
def is_allowed(self, id_parent, id_child):
if self.id_node_last is None:
return False
if self.id_node_last != id_child:
return False
g = self._graph
if not g.has_node(id_parent):
return False
if not g.has_node(id_child):
return False
if id_parent == id_child:
# parent and child must be different
return False
parent = g.node(id_parent)
if parent.k == 1:
# continuous parents are not allowed
return False
if parent.x is not None:
# observed parents are not allowed
return False
if g.get_root(id_parent) == g.get_root(id_child):
# parent and child must be in different subtrees
return False
return True
@profile
def is_allowed_child(self, id_child):
if self.id_node_last is None:
return False
if self.id_node_last != id_child:
return False
g = self._graph
if not g.has_node(id_child):
return False
return True
@profile
def __is_allowed_root(self, id_child):
g = self._graph
if not g.has_node(id_child):
return False
if g.is_root(id_child):
return False
return True
@profile
def select_update(self):
lklhd_pot_diff = self._get_lklhd_pot_diff()
if self.id_node_last is not None:
id_parent, id_child = self.select_strategy(lklhd_pot_diff)
self.id_node_last = None
self.__keep_old_parents_bak = self.keep_old_parents
# in this step, it needs to be set to False, in order to remove single parents of a Gaussian node
self.keep_old_parents = False
if id_parent == self.id_parent_last:
self._print("connected back to previous parent")
else:
self._print("found new parent!")
else:
id_parent = None
id_child = self.id_nodes_next.pop(0)
self.id_node_last = id_child
if self.__keep_old_parents_bak is not None:
self.keep_old_parents = self.__keep_old_parents_bak
self.id_parent_last = self._graph.get_parent(id_child)
create_node = False
self.__init_nodes_next()
return id_parent, id_child, create_node
class StructureUpdateGreedyBinary(StructureUpdate):
"""This class handles the parameter update step."""
def __init__(self, graph, data, inference, parameter_learning):
super(StructureUpdateGreedyBinary, self).__init__(graph, data, inference, parameter_learning)
self._register_properties()
self.select_strategy = StructureUpdate.select_max
self.find_best_k = True
@profile
def is_allowed(self, id_parent, id_child):
g = self._graph
if not g.has_node(id_parent):
return False
if not g.has_node(id_child):
return False
if id_parent == id_child:
# parent and child must be different
return False
if not g.is_root(id_parent):
return False
if not g.is_root(id_child):
return False
return True
@profile
def is_allowed_root(self, id_child):
return False
@profile
def select_update(self):
lklhd_pot_diff = self._get_lklhd_pot_diff()
id_parent, id_child = self.select_strategy(lklhd_pot_diff)
if (id_parent is not None) and (id_child is not None):
create_node = True
else:
create_node = None
return id_parent, id_child, create_node
class LogEntry(object):
ATTR_ORDERED = ['count', 'num_roots', 'lklhd','lklhd_diff','structure_diff','parameter_diff', 'num_nodes']
def __init__(self, *args):
assert len(args) == len(self.ATTR_ORDERED)
for i, attr in enumerate(self.ATTR_ORDERED):
setattr(self, attr, args[i])
def as_str(self):
string = ''
for attr in self.ATTR_ORDERED:
value = getattr(self, attr)
string += attr + "={} ".format(value)
return string
class Log(object):
def __init__(self):
self.log_entries = []
def append(self, log_entry):
assert isinstance(log_entry, LogEntry)
self.log_entries.append(log_entry)
def as_vector(self, attr):
result = np.zeros((len(self.log_entries),))
for i, entry in enumerate(self.log_entries):
result[i] = getattr(entry, attr)
return result
class StructureLearning(GraphManipulator):
def __init__(self, graph, inference, parameter_learning, structure_update):
super(StructureLearning, self).__init__(graph)
assert isinstance(inference, BeliefPropagation)
assert isinstance(parameter_learning, ParameterLearning)
assert isinstance(structure_update, StructureUpdate)
self._register_properties()
self._inference = inference
self._parameter_learning = parameter_learning
self._structure_update = structure_update
self.lklhd_mindiff = 1.e-6
self.count_max = 999999
self.log = Log()
self.pl_recursive = True
def __do_logging(self, count, lklhd_parameters, lklhd_structure, lklhd_last):
lklhd_diff = lklhd_parameters - lklhd_last
lklhd_diff_structure = lklhd_structure - lklhd_last
lklhd_diff_parameters = lklhd_parameters - lklhd_structure
log_entry = LogEntry(count, self._graph.get_num_roots(), lklhd_parameters, lklhd_diff, lklhd_diff_structure,
lklhd_diff_parameters, self._graph.number_of_nodes())
self._print(log_entry.as_str())
self.log.append(log_entry)
@profile
def run(self):
pl = self._parameter_learning
su = self._structure_update
lklhd_last = pl.run()
self._print('lklhd={}'.format(lklhd_last))
count = 1
continue_condition = count-1 < self.count_max
while continue_condition:
lklhd_structure, id_roots = su.run()
if lklhd_structure is not None:
# only run the parameter learning on the changed subtrees
pl.run(id_roots=id_roots, recursive=self.pl_recursive)
lklhd_parameters = self._inference.get_lklhd_all()
lklhd_diff = lklhd_parameters - lklhd_last
condition_lklhd = lklhd_diff > self.lklhd_mindiff
condition_count = count < self.count_max
continue_condition = condition_count & condition_lklhd
self.__do_logging(count, lklhd_parameters, lklhd_structure, lklhd_last)
lklhd_last = lklhd_parameters
count += 1
if not condition_lklhd:
self._print('Terminate because lklhd_diff <= mindiff.')
if not condition_count:
self._print('Terminate because count_max is reached.')
else:
continue_condition = False
self._print('No more possible structure updates.')
# flush stdout
# (for some reason otherwise no stdout is written during this stage, when streams are redirected to a
# network file)
sys.stdout.flush()
# final parameter optimization
self._print('Do final recursive parameter optimization.')
# change the parameter learning variables:
parameters_new = {'restart_recursive':True,
'lklhd_mindiff':1e-5,
'count_max':1000,
'print_every':100,
'print_em':True}
parameters_original = get_and_set_attr(pl, parameters_new)
lklhd_structure = lklhd_last
lklhd_parameters = pl.run()
self.__do_logging(count, lklhd_parameters, lklhd_structure, lklhd_last)
# restore old parameters:
get_and_set_attr(pl, parameters_original)
return lklhd_last
class LatentTree(ObjectRoot):
def __init__(self, structure_update=StructureUpdateCVPR2015):
super(LatentTree, self).__init__()
# init LT objects
self.graph = Graph()
self.data = Data(self.graph)
self.inference = BeliefPropagation(self.graph)
self.parameter_update = ParameterUpdate(self.graph, self.inference)
self.parameter_learning = ParameterLearning(self.graph, self.data, self.inference, self.parameter_update)
self.structure_update = structure_update(self.graph, self.data, self.inference, self.parameter_learning)
self.structure_learning = StructureLearning(self.graph, self.inference, self.parameter_learning, self.structure_update)
@profile
def set_structure_update(self, structure_update, attr_dict=None):
self.structure_update = structure_update(self.graph, self.data, self.inference, self.parameter_learning)
self.structure_learning._structure_update = self.structure_update
if attr_dict is not None:
for key, val in attr_dict.items():
setattr(self.structure_update, key, val)
@profile
def training(self, X, K, clear_properties=True):
# training
start_time = time.time()
id_nodes = self.graph.add_nodes(K) # create the observed nodes
self.data.insert_samples(id_nodes, X) # insert the data
self.structure_update.add_gaussian_parents() # add gaussian single parents
self.data.run() # init distributions random or from data
self.lklhd = self.structure_learning.run() # learn the structure
elapsed_time = time.time() - start_time
self._print("elapsed training time: {:.1f} min".format(elapsed_time / 60))
if clear_properties:
# clean up
self.clear_properties()
@profile
def testing(self, X_o, id_o, id_u, clear_properties=True):
# testing
self.data.insert_samples(id_o, X_o)
lklhd = self.inference.run()
X_u = self.inference.extract_samples(id_u)
if clear_properties:
# clean up
self.clear_properties()
return X_u, lklhd
def clear_properties(self):
self.data.clear_properties()
self.inference.clear_properties()
self.parameter_update.clear_properties()
self.parameter_learning.clear_properties()
self.structure_update.clear_properties()
self.structure_learning.clear_properties() | bsd-3-clause |
jmetzen/scikit-learn | examples/linear_model/plot_polynomial_interpolation.py | 168 | 2088 | #!/usr/bin/env python
"""
========================
Polynomial interpolation
========================
This example demonstrates how to approximate a function with a polynomial of
degree n_degree by using ridge regression. Concretely, from n_samples 1d
points, it suffices to build the Vandermonde matrix, which is n_samples x
n_degree+1 and has the following form:
[[1, x_1, x_1 ** 2, x_1 ** 3, ...],
[1, x_2, x_2 ** 2, x_2 ** 3, ...],
...]
Intuitively, this matrix can be interpreted as a matrix of pseudo features (the
points raised to some power). The matrix is akin to (but different from) the
matrix induced by a polynomial kernel.
This example shows that you can do non-linear regression with a linear model,
using a pipeline to add non-linear features. Kernel methods extend this idea
and can induce very high (even infinite) dimensional feature spaces.
"""
print(__doc__)
# Author: Mathieu Blondel
# Jake Vanderplas
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import Ridge
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
def f(x):
""" function to approximate by polynomial interpolation"""
return x * np.sin(x)
# generate points used to plot
x_plot = np.linspace(0, 10, 100)
# generate points and keep a subset of them
x = np.linspace(0, 10, 100)
rng = np.random.RandomState(0)
rng.shuffle(x)
x = np.sort(x[:20])
y = f(x)
# create matrix versions of these arrays
X = x[:, np.newaxis]
X_plot = x_plot[:, np.newaxis]
colors = ['teal', 'yellowgreen', 'gold']
lw = 2
plt.plot(x_plot, f(x_plot), color='cornflowerblue', linewidth=lw,
label="ground truth")
plt.scatter(x, y, color='navy', s=30, marker='o', label="training points")
for count, degree in enumerate([3, 4, 5]):
model = make_pipeline(PolynomialFeatures(degree), Ridge())
model.fit(X, y)
y_plot = model.predict(X_plot)
plt.plot(x_plot, y_plot, color=colors[count], linewidth=lw,
label="degree %d" % degree)
plt.legend(loc='lower left')
plt.show()
| bsd-3-clause |
heplesser/nest-simulator | extras/ConnPlotter/colormaps.py | 6 | 6919 | # -*- coding: utf-8 -*-
#
# colormaps.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
# ConnPlotter --- A Tool to Generate Connectivity Pattern Matrices
"""
Colormaps for ConnPlotter.
Provides the following functions and colormaps:
- make_colormap: based on color specification, create colormap
running from from white to fully saturated color
- redblue: from fully saturated red to white to fully saturated blue
- bluered: from fully saturated blue to white to fully saturated red
For all colormaps, "bad" values (NaN) are mapped to white.
Provides also ZeroCenterNorm, mapping negative values to 0..0.5,
positive to 0.5..1.
"""
# ----------------------------------------------------------------------------
import matplotlib.colors as mc
import matplotlib.cbook as cbook
import numpy as np
__all__ = ['ZeroCenterNorm', 'make_colormap', 'redblue', 'bluered',
'bad_color']
# ----------------------------------------------------------------------------
bad_color = (1.0, 1.0, 0.9)
# ----------------------------------------------------------------------------
class ZeroCenterNorm(mc.Normalize):
"""
Normalize so that value 0 is always at 0.5.
Code from matplotlib.colors.Normalize.
Copyright (c) 2002-2009 John D. Hunter; All Rights Reserved
http://matplotlib.sourceforge.net/users/license.html
"""
# ------------------------------------------------------------------------
def __call__(self, value, clip=None):
"""
Normalize given values to [0,1].
Returns data in same form as passed in.
value can be scalar or array.
"""
if clip is not None and clip is not False:
assert (False) # clip not supported
if cbook.iterable(value):
vtype = 'array'
val = np.ma.asarray(value).astype(np.float)
else:
vtype = 'scalar'
val = np.ma.array([value]).astype(np.float)
self.autoscale_None(val)
self.vmin = min(0, self.vmin)
self.vmax = max(0, self.vmax)
# imshow expects masked arrays
# fill entire array with 0.5
result = np.ma.array(0.5 * np.ma.asarray(np.ones(np.shape(val))),
dtype=np.float, mask=val.mask)
# change values != 0
result[val < 0] = 0.5 * (self.vmin - val[val < 0]) / self.vmin
result[val > 0] = 0.5 + 0.5 * val[val > 0] / self.vmax
if vtype == 'scalar':
result = result[0]
return result
# ------------------------------------------------------------------------
def inverse(self, value):
"""
Invert color map. Required by colorbar().
"""
if not self.scaled():
raise ValueError("Not invertible until scaled")
vmin, vmax = self.vmin, self.vmax
if cbook.iterable(value):
val = np.asarray(value)
res = np.zeros(np.shape(val))
res[val < 0.5] = vmin - 2 * vmin * val[val < 0.5]
res[val > 0.5] = 2 * (val[val > 0.5] - 0.5) * vmax
return res
else:
if value == 0.5:
return 0
elif value < 0.5:
return vmin - 2 * vmin * value # vmin < 0
else:
return 2 * (value - 0.5) * vmax
# ----------------------------------------------------------------------------
def make_colormap(color):
"""
Create LinearSegmentedColormap ranging from white to the given color.
Color can be given in any legal color format. Bad color is set to white.
"""
try:
r, g, b = mc.colorConverter.to_rgb(color)
except Exception:
raise ValueError('Illegal color specification: %s' % color.__repr__)
cm = mc.LinearSegmentedColormap(color.__str__(),
{'red': [(0.0, 1.0, 1.0),
(1.0, r, r)],
'green': [(0.0, 1.0, 1.0),
(1.0, g, g)],
'blue': [(0.0, 1.0, 1.0),
(1.0, b, b)]})
cm.set_bad(color=bad_color) # light yellow
return cm
# ----------------------------------------------------------------------------
redblue = mc.LinearSegmentedColormap('redblue',
{'red': [(0.0, 0.0, 1.0),
(0.5, 1.0, 1.0),
(1.0, 0.0, 0.0)],
'green': [(0.0, 0.0, 0.0),
(0.5, 1.0, 1.0),
(1.0, 0.0, 0.0)],
'blue': [(0.0, 0.0, 0.0),
(0.5, 1.0, 1.0),
(1.0, 1.0, 1.0)]})
redblue.set_bad(color=bad_color)
# ----------------------------------------------------------------------------
bluered = mc.LinearSegmentedColormap('bluered',
{'red': [(0.0, 0.0, 0.0),
(0.5, 1.0, 1.0),
(1.0, 1.0, 1.0)],
'green': [(0.0, 0.0, 0.0),
(0.5, 1.0, 1.0),
(1.0, 0.0, 0.0)],
'blue': [(0.0, 1.0, 1.0),
(0.5, 1.0, 1.0),
(1.0, 0.0, 0.0)]})
bluered.set_bad(color=bad_color)
# ----------------------------------------------------------------------------
if __name__ == '__main__':
# this should be proper unit tests
n1 = ZeroCenterNorm()
if (n1([-1, -0.5, 0.0, 0.5, 1.0]).data == np.array(
[0, 0.25, 0.5, 0.75, 1.0])).all():
print("n1 ok")
else:
print("n1 failed.")
n2 = ZeroCenterNorm(-1, 2)
if (n2([-1, -0.5, 0.0, 1.0, 2.0]).data == np.array(
[0, 0.25, 0.5, 0.75, 1.0])).all():
print("n2 ok")
else:
print("n2 failed.")
| gpl-2.0 |
alexeyche/ego | tests/testfuncs.py | 1 | 3182 | #!/usr/bin/env python
import numpy as np
import random
import egopy as ego
from egopy import Cov, Mean, Lik, Inf, Model, Acq
from math import pi, cos, sin, pow
from matplotlib import pyplot as plt
ego.setDebugLogLevel()
def scale_to(x, minv=None, maxv=None, a=0.0, b=1.0):
if minv is None:
minv = min(x)
if maxv is None:
maxv = max(x)
return ((b-a)*(x - minv)/(maxv-minv)) + a
def plot_mins(X, Y, plot_spec):
minY = min(Y)
minYIds = np.where(Y == minY)[0]
plt.plot(X[minYIds], [minY]*minYIds.shape[0], plot_spec)
seed = 9
if not seed is None:
rng = np.random.RandomState(seed)
else:
rng = np.random.RandomState()
def lhs_sample(n, rng):
return (rng.permutation(range(1,n+1)) - rng.random_sample(n))/float(n)
def rosen(x):
a = 1
b = 100
return ((a - x[0])**2 + b * (x[1]- x[0]*x[0])**2)/100.0
def branin(x):
"""
% Min = 0.1239 0.8183
% Min = 0.5428 0.1517 => 0.3979
% Min = 0.9617 0.1650
"""
a = x[0] * 15 - 5;
b = x[1] * 15;
return ((b-(5.1/(4*pi**2))*a**2+5*a/pi-6)**2+10*(1-1/(8*pi))*cos(a)+10)
def periodic1d(x):
"""
Opt: 0.23719
"""
return (x-0.3)*(x-0.3) + sin(20*x)*0.2
def sq1d(x):
return (0.5-x)**2
D, fopt = 1, sq1d
cov = Cov("cSqExpISO", D, [np.log(1.0), np.log(1.0)])
mean = Mean("mConst", D, [1.0])
lik = Lik("lGauss", D, [np.log(0.01)])
inf = Inf("iExact")
acq = Acq("aEI", D, [0.0])
model = Model(mean, cov, lik, inf, acq)
init_size = 5
X = np.zeros((init_size, D))
for di in range(D):
X[:, di] = lhs_sample(init_size, rng)
Y = np.asarray([ fopt(x) for x in X ])
#np.savetxt("/var/tmp/testfuncs.csv", np.hstack((X, np.asarray([Y]).T)), delimiter=',')
np.savetxt("/var/tmp/testfuncs.csv", np.hstack((X, Y)), delimiter=',')
model.setData(X, Y)
#model.optimizeHyp()
#gridSize = pow(1000, 1.0/D)
gridSize = 1000
#gridSize = 100
grid = list()
for di in range(D):
grid.append(np.linspace(0.0, 1.0, gridSize))
if D>1:
points = np.vstack(np.meshgrid(*grid)).reshape(len(grid), -1).T
else:
points = grid[0]
Ygrid = np.asarray([ fopt(x) for x in points ])
preds = model.getPrediction(points)
Ymean = np.asarray([ y.getMean() for y in preds ])
Ysd = np.asarray([ y.getSd() for y in preds ])
ev, dev = acq.evaluateCriteria(points)
ev = scale_to(ev)
ev = ev.reshape(len(ev))
if D == 1:
plt.figure(1)
plt.plot(points, Ymean, '-', color='green', linewidth=2.0)
plt.plot(points, Ygrid, '-', color='blue')
plt.fill_between(points, Ymean-Ysd, Ymean+Ysd, facecolor='green', interpolate=True, alpha=0.2)
plt.plot(X, Y, 'bp')
plt.plot(points, ev, '-', color='red')
plot_mins(points, ev, 'rd')
plot_mins(points, Ymean, 'bd')
else:
ev = ev.reshape((gridSize, gridSize))
Ymean = Ymean.reshape((gridSize, gridSize))
Ysd = Ysd.reshape((gridSize, gridSize))
Ygrid = Ygrid.reshape((gridSize, gridSize))
plt.figure(1)
plt.contourf(grid[0], grid[1], Ygrid, alpha=0.5)
plt.contour(grid[0], grid[1], Ymean, linestyles='dashed')
plt.figure(2)
plt.contourf(grid[0], grid[1], ev)
#ego.optimizeModel(model, "CG", {"MaxEval": 100})
#model.optimize(fopt)
#X, Y = model.getData()
| gpl-2.0 |
drandykass/fatiando | gallery/gravmag/euler_moving_window.py | 7 | 3054 | """
.. _gallery_euler_mw:
Euler deconvolution with a moving window
----------------------------------------
Euler deconvolution attempts to estimate the coordinates of simple (idealized)
sources from the input potential field data. There is a strong assumption that
the sources have simple geometries, like spheres, vertical pipes, vertical
planes, etc. So it wouldn't be much of a surprise if the solutions aren't great
when sources are complex.
Let's test the Euler deconvolution using a moving window scheme, a very common
approach used in all industry software. This is implemented in
:class:`fatiando.gravmag.euler.EulerDeconvMW`.
"""
from __future__ import print_function
from fatiando.gravmag import sphere, transform, euler
from fatiando import gridder, utils, mesher
import matplotlib.pyplot as plt
# Make some synthetic magnetic data to test our Euler deconvolution.
# The regional field
inc, dec = -45, 0
# Make a model of two spheres magnetized by induction only
model = [
mesher.Sphere(x=-1000, y=-1000, z=1500, radius=1000,
props={'magnetization': utils.ang2vec(2, inc, dec)}),
mesher.Sphere(x=1000, y=1500, z=1000, radius=1000,
props={'magnetization': utils.ang2vec(1, inc, dec)})]
print("Centers of the model spheres:")
print(model[0].center)
print(model[1].center)
# Generate some magnetic data from the model
shape = (100, 100)
area = [-5000, 5000, -5000, 5000]
x, y, z = gridder.regular(area, shape, z=-150)
data = sphere.tf(x, y, z, model, inc, dec)
# We also need the derivatives of our data
xderiv = transform.derivx(x, y, data, shape)
yderiv = transform.derivy(x, y, data, shape)
zderiv = transform.derivz(x, y, data, shape)
# Now we can run our Euler deconv solver on a moving window over the data.
# Each window will produce an estimated point for the source.
# We use a structural index of 3 to indicate that we think the sources are
# spheres.
# Run the Euler deconvolution on moving windows to produce a set of solutions
# by running the solver on 10 x 10 windows of size 1000 x 1000 m
solver = euler.EulerDeconvMW(x, y, z, data, xderiv, yderiv, zderiv,
structural_index=3, windows=(10, 10),
size=(1000, 1000))
# Use the fit() method to obtain the estimates
solver.fit()
# The estimated positions are stored as a list of [x, y, z] coordinates
# (actually a 2D numpy array)
print('Kept Euler solutions after the moving window scheme:')
print(solver.estimate_)
# Plot the solutions on top of the magnetic data. Remember that the true depths
# of the center of these sources is 1500 m and 1000 m.
plt.figure(figsize=(6, 5))
plt.title('Euler deconvolution with a moving window')
plt.contourf(y.reshape(shape), x.reshape(shape), data.reshape(shape), 30,
cmap="RdBu_r")
plt.scatter(solver.estimate_[:, 1], solver.estimate_[:, 0],
s=50, c=solver.estimate_[:, 2], cmap='cubehelix')
plt.colorbar(pad=0).set_label('Depth (m)')
plt.xlim(area[2:])
plt.ylim(area[:2])
plt.tight_layout()
plt.show()
| bsd-3-clause |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/matplotlib/backends/backend_gtkcairo.py | 21 | 2348 | """
GTK+ Matplotlib interface using cairo (not GDK) drawing operations.
Author: Steve Chaplin
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import gtk
if gtk.pygtk_version < (2,7,0):
import cairo.gtk
from matplotlib.backends import backend_cairo
from matplotlib.backends.backend_gtk import *
backend_version = 'PyGTK(%d.%d.%d) ' % gtk.pygtk_version + \
'Pycairo(%s)' % backend_cairo.backend_version
_debug = False
#_debug = True
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
if _debug: print('backend_gtkcairo.%s()' % fn_name())
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasGTKCairo(figure)
return FigureManagerGTK(canvas, num)
class RendererGTKCairo (backend_cairo.RendererCairo):
if gtk.pygtk_version >= (2,7,0):
def set_pixmap (self, pixmap):
self.gc.ctx = pixmap.cairo_create()
else:
def set_pixmap (self, pixmap):
self.gc.ctx = cairo.gtk.gdk_cairo_create (pixmap)
class FigureCanvasGTKCairo(backend_cairo.FigureCanvasCairo, FigureCanvasGTK):
filetypes = FigureCanvasGTK.filetypes.copy()
filetypes.update(backend_cairo.FigureCanvasCairo.filetypes)
def _renderer_init(self):
"""Override to use cairo (rather than GDK) renderer"""
if _debug: print('%s.%s()' % (self.__class__.__name__, _fn_name()))
self._renderer = RendererGTKCairo (self.figure.dpi)
class FigureManagerGTKCairo(FigureManagerGTK):
def _get_toolbar(self, canvas):
# must be inited after the window, drawingArea and figure
# attrs are set
if matplotlib.rcParams['toolbar']=='toolbar2':
toolbar = NavigationToolbar2GTKCairo (canvas, self.window)
else:
toolbar = None
return toolbar
class NavigationToolbar2Cairo(NavigationToolbar2GTK):
def _get_canvas(self, fig):
return FigureCanvasGTKCairo(fig)
FigureCanvas = FigureCanvasGTKCairo
FigureManager = FigureManagerGTKCairo
| gpl-3.0 |
FRESNA/atlite | atlite/gis.py | 1 | 23075 | # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2016-2020 The Atlite Authors
#
# SPDX-License-Identifier: GPL-3.0-or-later
"""
Functions for Geographic Information System.
"""
import numpy as np
import pandas as pd
import xarray as xr
import scipy as sp
import scipy.sparse
import geopandas as gpd
import rasterio as rio
import rasterio.warp
import multiprocessing as mp
from collections import OrderedDict
from pathlib import Path
from warnings import warn
from pyproj import CRS, Transformer
from shapely.ops import transform
from rasterio.warp import reproject, transform_bounds
from rasterio.mask import mask
from rasterio.features import geometry_mask
from scipy.ndimage.morphology import binary_dilation as dilation
from numpy import isin, empty
from shapely.strtree import STRtree
from tqdm import tqdm
import logging
logger = logging.getLogger(__name__)
def get_coords(x, y, time, dx=0.25, dy=0.25, dt='h', **kwargs):
"""
Create an cutout coordinate system on the basis of slices and step sizes.
Parameters
----------
x : slice
Numerical slices with lower and upper bound of the x dimension.
y : slice
Numerical slices with lower and upper bound of the y dimension.
time : slice
Slice with strings with lower and upper bound of the time dimension.
dx : float, optional
Step size of the x coordinate. The default is 0.25.
dy : float, optional
Step size of the y coordinate. The default is 0.25.
dt : str, optional
Frequency of the time coordinate. The default is 'h'. Valid are all
pandas offset aliases.
Returns
-------
ds : xarray.Dataset
Dataset with x, y and time variables, representing the whole coordinate
system.
"""
x = slice(*sorted([x.start, x.stop]))
y = slice(*sorted([y.start, y.stop]))
ds = xr.Dataset({'x': np.round(np.arange(-180, 180, dx), 9),
'y': np.round(np.arange(-90, 90, dy), 9),
'time': pd.date_range(start="1979", end="now", freq=dt)})
ds = ds.assign_coords(lon=ds.coords['x'], lat=ds.coords['y'])
ds = ds.sel(x=x, y=y, time=time)
return ds
def spdiag(v):
"""Create a sparse diagonal matrix from a 1-dimensional array."""
N = len(v)
inds = np.arange(N + 1, dtype=np.int32)
return sp.sparse.csr_matrix((v, inds[:-1], inds), (N, N))
def reproject_shapes(shapes, crs1, crs2):
"""Project a collection of shapes from one crs to another."""
transformer = Transformer.from_crs(crs1, crs2)
def _reproject_shape(shape):
return transform(transformer.transform, shape)
if isinstance(shapes, pd.Series):
return shapes.map(_reproject_shape)
elif isinstance(shapes, dict):
return OrderedDict((k, _reproject_shape(v)) for k, v in shapes.items())
else:
return list(map(_reproject_shape, shapes))
def reproject(shapes, p1, p2):
"""
Project a collection of shapes from one crs to another.
Deprecated since version 0.2.
"""
warn("reproject has been renamed to reproject_shapes", DeprecationWarning)
return reproject_shapes(shapes, p1, p2)
reproject.__doc__ = reproject_shapes.__doc__
def compute_indicatormatrix(orig, dest, orig_crs=4326, dest_crs=4326):
"""
Compute the indicatormatrix.
The indicatormatrix I[i,j] is a sparse representation of the ratio
of the area in orig[j] lying in dest[i], where orig and dest are
collections of polygons, i.e.
A value of I[i,j] = 1 indicates that the shape orig[j] is fully
contained in shape dest[j].
Note that the polygons must be in the same crs.
Parameters
----------
orig : Collection of shapely polygons
dest : Collection of shapely polygons
Returns
-------
I : sp.sparse.lil_matrix
Indicatormatrix
"""
orig = orig.geometry if isinstance(orig, gpd.GeoDataFrame) else orig
dest = dest.geometry if isinstance(dest, gpd.GeoDataFrame) else dest
dest = reproject_shapes(dest, dest_crs, orig_crs)
indicator = sp.sparse.lil_matrix((len(dest), len(orig)), dtype=float)
tree = STRtree(orig)
idx = dict((id(o), i) for i, o in enumerate(orig))
for i, d in enumerate(dest):
for o in tree.query(d):
if o.intersects(d):
j = idx[id(o)]
area = d.intersection(o).area
indicator[i, j] = area / o.area
return indicator
class ExclusionContainer:
"""Container for exclusion objects and meta data."""
def __init__(self, crs=3035, res=100):
"""Initialize a container for excluded areas.
Parameters
----------
crs : rasterio.CRS/proj.CRS/EPSG, optional
Base crs of the raster collection. All rasters and geometries
diverging from this crs will be converted to it.
The default is 3035.
res : float, optional
Resolution of the base raster. All diverging rasters will be
resampled using the gdal Resampling method 'nearest'.
The default is 100.
"""
self.rasters = []
self.geometries = []
self.crs = crs
self.res = res
def add_raster(self, raster, codes=None, buffer=0, invert=False, nodata=255,
allow_no_overlap=False, crs=None):
"""
Register a raster to the ExclusionContainer.
Parameters
----------
raster : str/rasterio.DatasetReader
Raster or path to raster which to exclude.
codes : int/list/function, optional
Codes in the raster which to exclude. Can be a callable function
which takes the mask (np.array) as argument and performs a
elementwise condition (must not change the shape). The default is 1.
buffer : int, optional
Buffer around the excluded areas in units of ExclusionContainer.crs.
Use this to create a buffer around the excluded/included area.
The default is 0.
invert : bool, optional
Whether to exclude (False) or include (True) the specified areas
of the raster. The default is False.
allow_no_overlap:
Allow that a raster and a shape (for which the raster will be used as
a mask) do not overlap. In this case an array with only `nodata` is
returned.
crs : rasterio.CRS/EPSG
CRS of the raster. Specify this if the raster has invalid crs.
"""
d = dict(raster=raster, codes=codes, buffer=buffer, invert=invert,
nodata=nodata, allow_no_overlap=allow_no_overlap, crs=crs)
self.rasters.append(d)
def add_geometry(self, geometry, buffer=0, invert=False):
"""
Register a collection of geometries to the ExclusionContainer.
Parameters
----------
geometry : str/path/geopandas.GeoDataFrame
Path to geometries or geometries which to exclude.
buffer : float, optional
Buffer around the excluded areas in units of ExclusionContainer.crs.
The default is 0.
invert : bool, optional
Whether to exclude (False) or include (True) the specified areas
of the geometries. The default is False.
"""
d = dict(geometry=geometry, buffer=buffer, invert=invert)
self.geometries.append(d)
def open_files(self):
"""Open rasters and load geometries."""
for d in self.rasters:
raster = d['raster']
if isinstance(raster, (str, Path)):
raster = rio.open(raster)
else:
assert isinstance(raster, rio.DatasetReader)
if (not raster.crs.is_valid if raster.crs is not None else True):
if d['crs']:
raster._crs = CRS(d['crs'])
else:
raise ValueError(f'CRS of {raster} is invalid, please '
'provide it.')
d['raster'] = raster
for d in self.geometries:
geometry = d['geometry']
if isinstance(geometry, (str, Path)):
geometry = gpd.read_file(geometry)
if isinstance(geometry, gpd.GeoDataFrame):
geometry = geometry.geometry
assert isinstance(geometry, gpd.GeoSeries)
assert geometry.crs is not None
geometry = geometry.to_crs(self.crs)
if d.get('buffer', 0) and not d.get('_buffered', False):
geometry = geometry.buffer(d['buffer'])
d['_buffered'] = True
d['geometry'] = geometry
@property
def all_closed(self):
"""Check whether all files in the raster container are closed."""
return all(isinstance(d['raster'], (str, Path)) for d in self.rasters)
@property
def all_open(self):
"""Check whether all files in the raster container are open."""
return all(isinstance(d['raster'], rio.DatasetReader) for d in self.rasters)
def __repr__(self):
return (f"Exclusion Container"
f"\n registered rasters: {len(self.rasters)} "
f"\n registered geometry collections: {len(self.geometries)}"
f"\n CRS: {self.crs} - Resolution: {self.res}")
def padded_transform_and_shape(bounds, res):
"""
Get the (transform, shape) tuple of a raster with resolution `res` and
bounds `bounds`.
"""
left, bottom = [(b // res)* res for b in bounds[:2]]
right, top = [(b // res + 1) * res for b in bounds[2:]]
shape = int((top - bottom) / res), int((right - left) / res)
return rio.Affine(res, 0, left, 0, -res, top), shape
def projected_mask(raster, geom, transform=None, shape=None, crs=None,
allow_no_overlap=False, **kwargs):
"""Load a mask and optionally project it to target resolution and shape."""
nodata = kwargs.get('nodata', 255)
kwargs.setdefault('indexes', 1)
if geom.crs != raster.crs:
geom = geom.to_crs(raster.crs)
if allow_no_overlap:
try:
masked, transform_ = mask(raster, geom, crop=True, **kwargs)
except ValueError:
res = raster.res[0]
transform_, shape = padded_transform_and_shape(geom.total_bounds, res)
masked = np.full(shape, nodata)
else:
masked, transform_ = mask(raster, geom, crop=True, **kwargs)
if transform is None or (transform_ == transform and shape == masked.shape):
return masked, transform_
assert shape is not None and crs is not None
return rio.warp.reproject(masked, empty(shape), src_crs=raster.crs,
dst_crs=crs, src_transform=transform_,
dst_transform=transform, dst_nodata=nodata)
def pad_extent(src, src_transform, dst_transform, src_crs, dst_crs, **kwargs):
"""
Pad the extent of `src` by an equivalent of one cell of the target raster.
This ensures that the array is large enough to not be treated as nodata in
all cells of the destination raster. If src.ndim > 2, the function expects
the last two dimensions to be y,x.
Additional keyword arguments are used in `np.pad()`.
"""
if src.size == 0:
return src, src_transform
left, top, right, bottom = *(src_transform*(0,0)), *(src_transform*(1,1))
covered = transform_bounds(src_crs, dst_crs, left, bottom, right, top)
covered_res = min(abs(covered[2] - covered[0]), abs(covered[3] - covered[1]))
pad = int(dst_transform[0] // covered_res * 1.1)
kwargs.setdefault('mode', 'constant')
if src.ndim == 2:
return rio.pad(src, src_transform, pad, **kwargs)
npad = ((0,0),) * (src.ndim - 2) + ((pad, pad), (pad, pad))
padded = np.pad(src, npad, **kwargs)
transform = list(src_transform)
transform[2] -= pad * transform[0]
transform[5] -= pad * transform[4]
return padded, rio.Affine(*transform[:6])
def shape_availability(geometry, excluder):
"""
Compute the eligible area in one or more geometries.
Parameters
----------
geometry : geopandas.Series
Geometry of which the eligible area is computed. If the series contains
more than one geometry, the eligble area of the combined geometries is
computed.
excluder : atlite.gis.ExclusionContainer
Container of all meta data or objects which to exclude, i.e.
rasters and geometries.
Returns
-------
masked : np.array
Mask whith eligible raster cells indicated by 1 and excluded cells by 0.
transform : rasterion.Affine
Affine transform of the mask.
"""
exclusions = []
if not excluder.all_open:
excluder.open_files()
assert geometry.crs == excluder.crs
bounds = rio.features.bounds(geometry)
transform, shape = padded_transform_and_shape(bounds, res=excluder.res)
masked = geometry_mask(geometry, shape, transform).astype(int)
exclusions.append(masked)
# For the following: 0 is eligible, 1 in excluded
raster = None
for d in excluder.rasters:
# allow reusing preloaded raster with different post-processing
if raster != d['raster']:
raster = d['raster']
kwargs_keys = ['allow_no_overlap', 'nodata']
kwargs = {k: v for k, v in d.items() if k in kwargs_keys}
masked, transform = projected_mask(d['raster'], geometry, transform,
shape, excluder.crs, **kwargs)
if d['codes']:
if callable(d['codes']):
masked_ = d['codes'](masked)
else:
masked_ = isin(masked, d['codes'])
else:
masked_ = masked
if d['invert']:
masked_ = ~(masked_).astype(bool)
if d['buffer']:
iterations = int(d['buffer'] / excluder.res) + 1
masked_ = dilation(masked_, iterations=iterations).astype(int)
exclusions.append(masked_.astype(int))
for d in excluder.geometries:
masked = ~geometry_mask(d['geometry'], shape, transform,
invert=d['invert'])
exclusions.append(masked.astype(int))
return (sum(exclusions) == 0).astype(float), transform
def shape_availability_reprojected(geometry, excluder, dst_transform, dst_crs,
dst_shape):
"""
Compute and reproject the eligible area of one or more geometries.
The function executes `shape_availability` and reprojects the calculated
mask onto a new raster defined by (dst_transform, dst_crs, dst_shape).
Before reprojecting, the function pads the mask such all non-nodata data
points are projected in full cells of the target raster. The ensures that
all data within the mask are projected correclty (GDAL inherent 'problem').
----------
geometry : geopandas.Series
Geometry in which the eligible area is computed. If the series contains
more than one geometry, the eligble area of the combined geometries is
computed.
excluder : atlite.gis.ExclusionContainer
Container of all meta data or objects which to exclude, i.e.
rasters and geometries.
dst_transform : rasterio.Affine
Transform of the target raster.
dst_crs : rasterio.CRS/proj.CRS
CRS of the target raster.
dst_shape : tuple
Shape of the target raster.
masked : np.array
Average share of available area per grid cell. 0 indicates excluded,
1 is fully included.
transform : rasterio.Affine
Affine transform of the mask.
"""
masked, transform = shape_availability(geometry, excluder)
masked, transform = pad_extent(masked, transform, dst_transform,
excluder.crs, dst_crs)
return rio.warp.reproject(masked, empty(dst_shape), resampling=5,
src_transform=transform,
dst_transform=dst_transform,
src_crs=excluder.crs, dst_crs=dst_crs,)
def _init_process(shapes_, excluder_, dst_transform_, dst_crs_, dst_shapes_):
global shapes, excluder, dst_transform, dst_crs, dst_shapes
shapes, excluder = shapes_, excluder_
dst_transform, dst_crs, dst_shapes = dst_transform_, dst_crs_, dst_shapes_
def _process_func(i):
args = (excluder, dst_transform, dst_crs, dst_shapes)
return shape_availability_reprojected(shapes.loc[[i]], *args)[0]
def compute_availabilitymatrix(cutout, shapes, excluder, nprocesses=None,
disable_progressbar=False):
"""
Compute the eligible share within cutout cells in the overlap with shapes.
For parallel calculation (nprocesses not None) the excluder must not be
initialized and all raster references must be strings. Otherwise processes
are colliding when reading from one common rasterio.DatasetReader.
Parameters
----------
cutout : atlite.Cutout
Cutout which the availability matrix is aligned to.
shapes : geopandas.Series/geopandas.DataFrame
Geometries for which the availabilities are calculated.
excluder : atlite.gis.ExclusionContainer
Container of all meta data or objects which to exclude, i.e.
rasters and geometries.
nprocesses : int, optional
Number of processes to use for calculating the matrix. The paralle-
lization can heavily boost the calculation speed. The default is None.
disable_progressbar: bool, optional
Disable the progressbar if nprocesses is not None. Then the `map`
function instead of the `imap` function is used for the multiprocessing
pool. This speeds up the calculation.
Returns
-------
availabilities : xr.DataArray
DataArray of shape (|shapes|, |y|, |x|) containing all the eligible
share of cutout cell (x,y) in the overlap with shape i.
Notes
-----
The rasterio (or GDAL) average downsampling returns different results
dependent on how the target raster (the cutout raster) is spanned.
Either it is spanned from the top left going downwards,
e.g. Affine(0.25, 0, 0, 0, -0.25, 50), or starting in the
lower left corner and going up, e.g. Affine(0.25, 0, 0, 0, 0.25, 50).
Here we stick to the top down version which is why we use
`cutout.transform_r` and flipping the y-axis in the end.
"""
availability = []
shapes = shapes.geometry if isinstance(shapes, gpd.GeoDataFrame) else shapes
shapes = shapes.to_crs(excluder.crs)
args = (excluder, cutout.transform_r, cutout.crs, cutout.shape)
tqdm_kwargs = dict(ascii=False, unit=' gridcells', total=len(shapes),
desc='Compute availability matrix')
if nprocesses is None:
for i in tqdm(shapes.index, **tqdm_kwargs):
_ = shape_availability_reprojected(shapes.loc[[i]], *args)[0]
availability.append(_)
else:
assert excluder.all_closed, ('For parallelization all raster files '
'in excluder must be closed')
kwargs = {'initializer': _init_process,
'initargs': (shapes, *args),
'maxtasksperchild': 20,
'processes': nprocesses}
with mp.get_context('spawn').Pool(**kwargs) as pool:
if disable_progressbar:
availability = list(pool.map(_process_func, shapes.index))
else:
availability = list(tqdm(pool.imap(_process_func, shapes.index),
**tqdm_kwargs))
availability = np.stack(availability)[:, ::-1] # flip axis, see Notes
coords=[(shapes.index), ('y', cutout.data.y.data), ('x', cutout.data.x.data)]
return xr.DataArray(availability, coords=coords)
def maybe_swap_spatial_dims(ds, namex='x', namey='y'):
"""Swap order of spatial dimensions according to atlite concention."""
swaps = {}
lx, rx = ds.indexes[namex][[0, -1]]
ly, uy = ds.indexes[namey][[0, -1]]
if lx > rx:
swaps[namex] = slice(None, None, -1)
if uy < ly:
swaps[namey] = slice(None, None, -1)
return ds.isel(**swaps) if swaps else ds
def _as_transform(x, y):
lx, rx = x[[0, -1]]
ly, uy = y[[0, -1]]
dx = float(rx - lx) / float(len(x) - 1)
dy = float(uy - ly) / float(len(y) - 1)
return rio.Affine(dx, 0, lx - dx/2, 0, dy, ly - dy/2)
def regrid(ds, dimx, dimy, **kwargs):
"""
Interpolate Dataset or DataArray `ds` to a new grid, using rasterio's
reproject facility.
See also: https://mapbox.github.io/rasterio/topics/resampling.html
Parameters
----------
ds : xr.Dataset|xr.DataArray
N-dim data on a spatial grid
dimx : pd.Index
New x-coordinates in destination crs.
dimx.name MUST refer to x-coord of ds.
dimy : pd.Index
New y-coordinates in destination crs.
dimy.name MUST refer to y-coord of ds.
**kwargs :
Arguments passed to rio.wrap.reproject; of note:
- resampling is one of gis.Resampling.{average,cubic,bilinear,nearest}
- src_crs, dst_crs define the different crs (default: EPSG 4326, ie latlong)
"""
namex = dimx.name
namey = dimy.name
ds = maybe_swap_spatial_dims(ds, namex, namey)
src_transform = _as_transform(ds.indexes[namex], ds.indexes[namey])
dst_transform = _as_transform(dimx, dimy)
dst_shape = len(dimy), len(dimx)
kwargs.update(dst_transform=dst_transform)
kwargs.setdefault("src_crs", CRS.from_epsg(4326))
kwargs.setdefault("dst_crs", CRS.from_epsg(4326))
def _reproject(src, **kwargs):
shape = src.shape[:-2] + dst_shape
src, trans = pad_extent(src, src_transform, dst_transform,
kwargs['src_crs'], kwargs['dst_crs'], mode='edge')
return rio.warp.reproject(src, empty(shape), src_transform=trans, **kwargs)[0]
data_vars = ds.data_vars.values() if isinstance(ds, xr.Dataset) else (ds,)
dtypes = {da.dtype for da in data_vars}
assert len(dtypes) == 1, \
"regrid can only reproject datasets with homogeneous dtype"
return (xr.apply_ufunc(_reproject,
ds,
input_core_dims=[[namey, namex]],
output_core_dims=[['yout', 'xout']],
output_dtypes=[dtypes.pop()],
dask_gufunc_kwargs =
dict(output_sizes={'yout': dst_shape[0],
'xout': dst_shape[1]}),
dask='parallelized',
kwargs=kwargs)
.rename({'yout': namey, 'xout': namex})
.assign_coords(**{namey: (namey, dimy.data, ds.coords[namey].attrs),
namex: (namex, dimx.data, ds.coords[namex].attrs)})
.assign_attrs(**ds.attrs))
| gpl-3.0 |
annayqho/TheCannon | code/lamost/xcalib_5labels/cross_validation.py | 1 | 6860 | """
Divide 9952 training objects into eight groups,
and do an 8-fold leave-1/8 out.
"""
import numpy as np
import glob
import matplotlib.pyplot as plt
import sys
sys.path.insert(0, '/home/annaho/aida41040/annaho/TheCannon/TheCannon')
sys.path.insert(0, '/home/annaho/aida41040/annaho/TheCannon')
from TheCannon import dataset
from TheCannon import model
from TheCannon import lamost
from astropy.table import Table
from matplotlib.colors import LogNorm
from matplotlib import rc
rc('font', family='serif')
rc('text', usetex=True)
import os
import pyfits
direc_ref = "/Users/annaho/TheCannon/data/lamost_paper"
def group_data():
""" Load the reference data, and assign each object
a random integer from 0 to 7. Save the IDs. """
tr_obj = np.load("%s/ref_id.npz" %direc_ref)['arr_0']
groups = np.random.randint(0, 8, size=len(tr_obj))
np.savez("ref_groups.npz", groups)
def train(ds, ii):
""" Run the training step, given a dataset object. """
print("Loading model")
m = model.CannonModel(2)
print("Training...")
m.fit(ds)
np.savez("./ex%s_coeffs.npz" %ii, m.coeffs)
np.savez("./ex%s_scatters.npz" %ii, m.scatters)
np.savez("./ex%s_chisqs.npz" %ii, m.chisqs)
np.savez("./ex%s_pivots.npz" %ii, m.pivots)
fig = m.diagnostics_leading_coeffs(ds)
plt.savefig("ex%s_leading_coeffs.png" %ii)
# m.diagnostics_leading_coeffs_triangle(ds)
# m.diagnostics_plot_chisq(ds)
return m
def load_model(ii):
print("Loading model")
m = model.CannonModel(2)
m.coeffs = np.load("./ex%s_coeffs.npz" %ii)['arr_0']
m.scatters = np.load("./ex%s_scatters.npz" %ii)['arr_0']
m.chisqs = np.load("./ex%s_chisqs.npz" %ii)['arr_0']
m.pivots = np.load("./ex%s_pivots.npz" %ii)['arr_0']
return m
def test(ds, m, group):
nguesses = 7
nobj = len(ds.test_ID)
nlabels = len(m.pivots)
choose = np.random.randint(0,nobj,size=nguesses)
tr_label = ds.tr_label
print("nlab")
print(nlabels)
print("nobj")
print(nobj)
print("tr label shape")
print(tr_label.shape)
print("m pivots shape")
print(m.pivots.shape)
starting_guesses = tr_label[choose]-m.pivots
labels = np.zeros((nguesses, nobj, nlabels))
chisq = np.zeros((nguesses, nobj))
errs = np.zeros(labels.shape)
for ii,guess in enumerate(starting_guesses):
a,b,c = test_step_iteration(ds,m,starting_guesses[ii])
labels[ii,:] = a
chisq[ii,:] = b
errs[ii,:] = c
np.savez("ex%s_labels_all_starting_vals.npz" %group, labels)
np.savez("ex%s_chisq_all_starting_vals.npz" %group, chisq)
np.savez("ex%s_errs_all_starting_vals.npz" %group, errs)
choose = np.argmin(chisq, axis=0)
best_chisq = np.min(chisq, axis=0)
best_labels = np.zeros(tr_label.shape)
best_errs = np.zeros(best_labels.shape)
for jj,val in enumerate(choose):
best_labels[jj,:] = labels[:,jj,:][val]
best_errs[jj,:] = errs[:,jj,:][val]
np.savez("./ex%s_cannon_label_vals.npz" %group, best_labels)
np.savez("./ex%s_cannon_label_chisq.npz" %group, best_chisq)
np.savez("./ex%s_cannon_label_errs.npz" %group, best_errs)
ds.test_label_vals = best_labels
ds.diagnostics_survey_labels()
ds.diagnostics_1to1(figname = "ex%s_1to1_test_label" %group)
def infer_spectra(ds, m):
m.infer_spectra(ds)
return m.model_spectra
def test_step_iteration(ds, m, starting_guess):
errs, chisq = m.infer_labels(ds, starting_guess)
return ds.test_label_vals, chisq, errs
def load_dataset(ii):
("loading data")
groups = np.load("ref_groups.npz")['arr_0']
ref_label = np.load("%s/ref_label.npz" %direc_ref)['arr_0']
ref_id = np.load("%s/ref_id.npz" %direc_ref)['arr_0']
ref_flux = np.load("%s/ref_flux.npz" %direc_ref)['arr_0']
ref_ivar = np.load("%s/ref_ivar.npz" %direc_ref)['arr_0']
wl = np.load("%s/wl.npz" %direc_ref)['arr_0']
print("Leaving out group %s" %ii)
train_on = groups != ii
test_on = groups == ii
tr_label = ref_label[train_on]
tr_id = ref_id[train_on]
tr_flux = ref_flux[train_on]
tr_ivar = ref_ivar[train_on]
print("Training on %s objects" %len(tr_id))
test_label = ref_label[test_on]
test_id = ref_id[test_on]
test_flux = ref_flux[test_on]
test_ivar = ref_ivar[test_on]
print("Testing on %s objects" %len(test_id))
print("Loading dataset...")
ds = dataset.Dataset(
wl, tr_id, tr_flux, tr_ivar, tr_label,
test_id, test_flux, test_ivar)
ds.test_label_vals = np.load("./ex%s_cannon_label_vals.npz" %group)['arr_0']
print(ds.test_label_vals.shape)
ds.set_label_names(
['T_{eff}', '\log g', '[M/H]', '[\\alpha/Fe]', 'AKWISE'])
fig = ds.diagnostics_SNR()
plt.savefig("ex%s_SNR.png" %ii)
fig = ds.diagnostics_ref_labels()
plt.savefig("ex%s_ref_label_triangle.png" %ii)
np.savez("ex%s_tr_snr.npz" %ii, ds.tr_SNR)
return ds
def xvalidate():
""" Train a model, leaving out a group corresponding
to a random integer from 0 to 7, e.g. leave out 0.
Test on the remaining 1/8 of the sample. """
print("Loading data")
groups = np.load("ref_groups.npz")['arr_0']
ref_label = np.load("%s/ref_label.npz" %direc_ref)['arr_0']
ref_id = np.load("%s/ref_id.npz" %direc_ref)['arr_0']
ref_flux = np.load("%s/ref_flux.npz" %direc_ref)['arr_0']
ref_ivar = np.load("%s/ref_ivar.npz" %direc_ref)['arr_0']
wl = np.load("%s/wl.npz" %direc_ref)['arr_0']
num_models = 8
for ii in np.arange(num_models):
print("Leaving out group %s" %ii)
train_on = groups != ii
test_on = groups == ii
tr_label = ref_label[train_on]
tr_id = ref_id[train_on]
tr_flux = ref_flux[train_on]
tr_ivar = ref_ivar[train_on]
print("Training on %s objects" %len(tr_id))
test_label = ref_label[test_on]
test_id = ref_id[test_on]
test_flux = ref_flux[test_on]
test_ivar = ref_ivar[test_on]
print("Testing on %s objects" %len(test_id))
print("Loading dataset...")
ds = dataset.Dataset(
wl, tr_id, tr_flux, tr_ivar, tr_label,
test_id, test_flux, test_ivar)
ds.set_label_names(
['T_{eff}', '\log g', '[M/H]', '[\\alpha/Fe]', 'AKWISE'])
fig = ds.diagnostics_SNR()
plt.savefig("ex%s_SNR.png" %ii)
fig = ds.diagnostics_ref_labels()
plt.savefig("ex%s_ref_label_triangle.png" %ii)
np.savez("ex%s_tr_snr.npz" %ii, ds.tr_SNR)
# train a model
m = train(ds, ii)
# test step
ds.tr_label = test_label # to compare the results
test(ds, m, ii)
if __name__=="__main__":
group = 2
ds = load_dataset(group)
m = load_model(group)
model_spectra = infer_spectra(ds, m)
| mit |
Lawrence-Liu/scikit-learn | examples/model_selection/plot_roc_crossval.py | 247 | 3253 | """
=============================================================
Receiver Operating Characteristic (ROC) with cross validation
=============================================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality using cross-validation.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
This example shows the ROC response of different datasets, created from K-fold
cross-validation. Taking all of these curves, it is possible to calculate the
mean area under curve, and see the variance of the curve when the
training set is split into different subsets. This roughly shows how the
classifier output is affected by changes in the training data, and how
different the splits generated by K-fold cross-validation are from one another.
.. note::
See also :func:`sklearn.metrics.auc_score`,
:func:`sklearn.cross_validation.cross_val_score`,
:ref:`example_model_selection_plot_roc.py`,
"""
print(__doc__)
import numpy as np
from scipy import interp
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.cross_validation import StratifiedKFold
###############################################################################
# Data IO and generation
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
X, y = X[y != 2], y[y != 2]
n_samples, n_features = X.shape
# Add noisy features
random_state = np.random.RandomState(0)
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
###############################################################################
# Classification and ROC analysis
# Run classifier with cross-validation and plot ROC curves
cv = StratifiedKFold(y, n_folds=6)
classifier = svm.SVC(kernel='linear', probability=True,
random_state=random_state)
mean_tpr = 0.0
mean_fpr = np.linspace(0, 1, 100)
all_tpr = []
for i, (train, test) in enumerate(cv):
probas_ = classifier.fit(X[train], y[train]).predict_proba(X[test])
# Compute ROC curve and area the curve
fpr, tpr, thresholds = roc_curve(y[test], probas_[:, 1])
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, lw=1, label='ROC fold %d (area = %0.2f)' % (i, roc_auc))
plt.plot([0, 1], [0, 1], '--', color=(0.6, 0.6, 0.6), label='Luck')
mean_tpr /= len(cv)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
plt.plot(mean_fpr, mean_tpr, 'k--',
label='Mean ROC (area = %0.2f)' % mean_auc, lw=2)
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
asanfilippo7/osf.io | scripts/analytics/utils.py | 2 | 2982 | # -*- coding: utf-8 -*-
import os
import unicodecsv as csv
from bson import ObjectId
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import requests
from website import util
from website import settings as website_settings
def oid_to_datetime(oid):
return ObjectId(oid).generation_time
def mkdirp(path):
try:
os.makedirs(path)
except OSError:
pass
def plot_dates(dates, *args, **kwargs):
if dates is None or len(dates) == 0:
return -1
"""Plot date histogram."""
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist(
[mdates.date2num(each) for each in dates],
*args, **kwargs
)
fig.autofmt_xdate()
ax.format_xdata = mdates.DateFormatter('%Y-%m-%d')
ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
return fig
def make_csv(fp, rows, headers=None):
writer = csv.writer(fp)
if headers:
writer.writerow(headers)
writer.writerows(rows)
def send_file(name, content_type, stream, node, user, create=True, path='/'):
"""Upload file to OSF using waterbutler v1 api
:param str name: The name of the requested file
:param str content_type: Content-Type
:param StringIO stream: file-like stream to be uploaded
:param Node node: Project Node
:param User user: User whose cookie will be used
:param Bool create: Create or update file
:param str path: Waterbutler V1 path of the requested file
"""
if not node:
return
node_id = node._id
if not user:
return
cookies = {website_settings.COOKIE_NAME:user.get_or_create_cookie()}
# create a new folder
if stream is None:
upload_url = util.waterbutler_api_url_for(node_id, 'osfstorage', path, kind='folder', name=name)
print('create folder: url={}'.format(upload_url))
resp = requests.put(
upload_url,
headers={'Content-Type': content_type},
cookies=cookies,
)
if resp.status_code != 201:
resp.raise_for_status()
return resp
# create or update a file
stream.seek(0)
if create:
upload_url = util.waterbutler_api_url_for(node_id, 'osfstorage', path, kind='file', name=name)
print('create file: url={}'.format(upload_url))
else:
path = '/{}'.format(name)
upload_url = util.waterbutler_api_url_for(node_id, 'osfstorage', path, kind='file')
print('update file: url={}'.format(upload_url))
resp = requests.put(
upload_url,
data=stream,
headers={'Content-Type': content_type},
cookies=cookies,
)
if resp.status_code not in [200, 201, 503, 409]:
resp.raise_for_status()
if resp.status_code == 503:
pass # forward 503 error back to the caller
elif resp.status_code == 409:
print('I/O Warning: cannot create new file/folder that already exists.') # this should never appear
return resp
| apache-2.0 |
NvanAdrichem/networkx | networkx/convert_matrix.py | 2 | 33323 | """Functions to convert NetworkX graphs to and from numpy/scipy matrices.
The preferred way of converting data to a NetworkX graph is through the
graph constuctor. The constructor calls the to_networkx_graph() function
which attempts to guess the input type and convert it automatically.
Examples
--------
Create a 10 node random graph from a numpy matrix
>>> import numpy
>>> a = numpy.reshape(numpy.random.random_integers(0,1,size=100),(10,10))
>>> D = nx.DiGraph(a)
or equivalently
>>> D = nx.to_networkx_graph(a,create_using=nx.DiGraph())
See Also
--------
nx_agraph, nx_pydot
"""
# Copyright (C) 2006-2014 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import warnings
import itertools
import networkx as nx
from networkx.convert import _prep_create_using
from networkx.utils import not_implemented_for
__author__ = """\n""".join(['Aric Hagberg <[email protected]>',
'Pieter Swart ([email protected])',
'Dan Schult([email protected])'])
__all__ = ['from_numpy_matrix', 'to_numpy_matrix',
'from_pandas_dataframe', 'to_pandas_dataframe',
'to_numpy_recarray',
'from_scipy_sparse_matrix', 'to_scipy_sparse_matrix']
def to_pandas_dataframe(G, nodelist=None, dtype=None, order=None,
multigraph_weight=sum, weight='weight', nonedge=0.0):
"""Return the graph adjacency matrix as a Pandas DataFrame.
Parameters
----------
G : graph
The NetworkX graph used to construct the Pandas DataFrame.
nodelist : list, optional
The rows and columns are ordered according to the nodes in `nodelist`.
If `nodelist` is None, then the ordering is produced by G.nodes().
multigraph_weight : {sum, min, max}, optional
An operator that determines how weights in multigraphs are handled.
The default is to sum the weights of the multiple edges.
weight : string or None, optional
The edge attribute that holds the numerical value used for
the edge weight. If an edge does not have that attribute, then the
value 1 is used instead.
nonedge : float, optional
The matrix values corresponding to nonedges are typically set to zero.
However, this could be undesirable if there are matrix values
corresponding to actual edges that also have the value zero. If so,
one might prefer nonedges to have some other value, such as nan.
Returns
-------
df : Pandas DataFrame
Graph adjacency matrix
Notes
-----
The DataFrame entries are assigned to the weight edge attribute. When
an edge does not have a weight attribute, the value of the entry is set to
the number 1. For multiple (parallel) edges, the values of the entries
are determined by the 'multigraph_weight' parameter. The default is to
sum the weight attributes for each of the parallel edges.
When `nodelist` does not contain every node in `G`, the matrix is built
from the subgraph of `G` that is induced by the nodes in `nodelist`.
The convention used for self-loop edges in graphs is to assign the
diagonal matrix entry value to the weight attribute of the edge
(or the number 1 if the edge has no weight attribute). If the
alternate convention of doubling the edge weight is desired the
resulting Pandas DataFrame can be modified as follows:
>>> import pandas as pd
>>> import numpy as np
>>> G = nx.Graph([(1,1)])
>>> df = nx.to_pandas_dataframe(G, dtype=int)
>>> df
1
1 1
>>> df.values[np.diag_indices_from(df)] *= 2
>>> df
1
1 2
Examples
--------
>>> G = nx.MultiDiGraph()
>>> G.add_edge(0,1,weight=2)
>>> G.add_edge(1,0)
>>> G.add_edge(2,2,weight=3)
>>> G.add_edge(2,2)
>>> nx.to_pandas_dataframe(G, nodelist=[0,1,2], dtype=int)
0 1 2
0 0 2 0
1 1 0 0
2 0 0 4
"""
import pandas as pd
M = to_numpy_matrix(G, nodelist=nodelist, dtype=dtype, order=order,
multigraph_weight=multigraph_weight, weight=weight,
nonedge=nonedge)
if nodelist is None:
nodelist = list(G)
return pd.DataFrame(data=M, index=nodelist, columns=nodelist)
def from_pandas_dataframe(df, source, target, edge_attr=None,
create_using=None):
"""Return a graph from Pandas DataFrame.
The Pandas DataFrame should contain at least two columns of node names and
zero or more columns of node attributes. Each row will be processed as one
edge instance.
Note: This function iterates over DataFrame.values, which is not
guaranteed to retain the data type across columns in the row. This is only
a problem if your row is entirely numeric and a mix of ints and floats. In
that case, all values will be returned as floats. See the
DataFrame.iterrows documentation for an example.
Parameters
----------
df : Pandas DataFrame
An edge list representation of a graph
source : str or int
A valid column name (string or iteger) for the source nodes (for the
directed case).
target : str or int
A valid column name (string or iteger) for the target nodes (for the
directed case).
edge_attr : str or int, iterable, True
A valid column name (str or integer) or list of column names that will
be used to retrieve items from the row and add them to the graph as edge
attributes. If `True`, all of the remaining columns will be added.
create_using : NetworkX graph
Use specified graph for result. The default is Graph()
See Also
--------
to_pandas_dataframe
Examples
--------
Simple integer weights on edges:
>>> import pandas as pd
>>> import numpy as np
>>> r = np.random.RandomState(seed=5)
>>> ints = r.random_integers(1, 10, size=(3,2))
>>> a = ['A', 'B', 'C']
>>> b = ['D', 'A', 'E']
>>> df = pd.DataFrame(ints, columns=['weight', 'cost'])
>>> df[0] = a
>>> df['b'] = b
>>> df
weight cost 0 b
0 4 7 A D
1 7 1 B A
2 10 9 C E
>>> G=nx.from_pandas_dataframe(df, 0, 'b', ['weight', 'cost'])
>>> G['E']['C']['weight']
10
>>> G['E']['C']['cost']
9
"""
g = _prep_create_using(create_using)
# Index of source and target
src_i = df.columns.get_loc(source)
tar_i = df.columns.get_loc(target)
if edge_attr:
# If all additional columns requested, build up a list of tuples
# [(name, index),...]
if edge_attr is True:
# Create a list of all columns indices, ignore nodes
edge_i = []
for i, col in enumerate(df.columns):
if col is not source and col is not target:
edge_i.append((col, i))
# If a list or tuple of name is requested
elif isinstance(edge_attr, (list, tuple)):
edge_i = [(i, df.columns.get_loc(i)) for i in edge_attr]
# If a string or int is passed
else:
edge_i = [(edge_attr, df.columns.get_loc(edge_attr)),]
# Iteration on values returns the rows as Numpy arrays
for row in df.values:
g.add_edge(row[src_i], row[tar_i], attr_dict={i:row[j] for i, j in edge_i})
# If no column names are given, then just return the edges.
else:
for row in df.values:
g.add_edge(row[src_i], row[tar_i])
return g
def to_numpy_matrix(G, nodelist=None, dtype=None, order=None,
multigraph_weight=sum, weight='weight', nonedge=0.0):
"""Return the graph adjacency matrix as a NumPy matrix.
Parameters
----------
G : graph
The NetworkX graph used to construct the NumPy matrix.
nodelist : list, optional
The rows and columns are ordered according to the nodes in `nodelist`.
If `nodelist` is None, then the ordering is produced by G.nodes().
dtype : NumPy data type, optional
A valid single NumPy data type used to initialize the array.
This must be a simple type such as int or numpy.float64 and
not a compound data type (see to_numpy_recarray)
If None, then the NumPy default is used.
order : {'C', 'F'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. If None, then the NumPy default
is used.
multigraph_weight : {sum, min, max}, optional
An operator that determines how weights in multigraphs are handled.
The default is to sum the weights of the multiple edges.
weight : string or None optional (default = 'weight')
The edge attribute that holds the numerical value used for
the edge weight. If an edge does not have that attribute, then the
value 1 is used instead.
nonedge : float (default = 0.0)
The matrix values corresponding to nonedges are typically set to zero.
However, this could be undesirable if there are matrix values
corresponding to actual edges that also have the value zero. If so,
one might prefer nonedges to have some other value, such as nan.
Returns
-------
M : NumPy matrix
Graph adjacency matrix
See Also
--------
to_numpy_recarray, from_numpy_matrix
Notes
-----
The matrix entries are assigned to the weight edge attribute. When
an edge does not have a weight attribute, the value of the entry is set to
the number 1. For multiple (parallel) edges, the values of the entries
are determined by the `multigraph_weight` parameter. The default is to
sum the weight attributes for each of the parallel edges.
When `nodelist` does not contain every node in `G`, the matrix is built
from the subgraph of `G` that is induced by the nodes in `nodelist`.
The convention used for self-loop edges in graphs is to assign the
diagonal matrix entry value to the weight attribute of the edge
(or the number 1 if the edge has no weight attribute). If the
alternate convention of doubling the edge weight is desired the
resulting Numpy matrix can be modified as follows:
>>> import numpy as np
>>> G = nx.Graph([(1, 1)])
>>> A = nx.to_numpy_matrix(G)
>>> A
matrix([[ 1.]])
>>> A.A[np.diag_indices_from(A)] *= 2
>>> A
matrix([[ 2.]])
Examples
--------
>>> G = nx.MultiDiGraph()
>>> G.add_edge(0,1,weight=2)
>>> G.add_edge(1,0)
>>> G.add_edge(2,2,weight=3)
>>> G.add_edge(2,2)
>>> nx.to_numpy_matrix(G, nodelist=[0,1,2])
matrix([[ 0., 2., 0.],
[ 1., 0., 0.],
[ 0., 0., 4.]])
"""
import numpy as np
if nodelist is None:
nodelist = list(G)
nodeset = set(nodelist)
if len(nodelist) != len(nodeset):
msg = "Ambiguous ordering: `nodelist` contained duplicates."
raise nx.NetworkXError(msg)
nlen=len(nodelist)
undirected = not G.is_directed()
index=dict(zip(nodelist,range(nlen)))
# Initially, we start with an array of nans. Then we populate the matrix
# using data from the graph. Afterwards, any leftover nans will be
# converted to the value of `nonedge`. Note, we use nans initially,
# instead of zero, for two reasons:
#
# 1) It can be important to distinguish a real edge with the value 0
# from a nonedge with the value 0.
#
# 2) When working with multi(di)graphs, we must combine the values of all
# edges between any two nodes in some manner. This often takes the
# form of a sum, min, or max. Using the value 0 for a nonedge would
# have undesirable effects with min and max, but using nanmin and
# nanmax with initially nan values is not problematic at all.
#
# That said, there are still some drawbacks to this approach. Namely, if
# a real edge is nan, then that value is a) not distinguishable from
# nonedges and b) is ignored by the default combinator (nansum, nanmin,
# nanmax) functions used for multi(di)graphs. If this becomes an issue,
# an alternative approach is to use masked arrays. Initially, every
# element is masked and set to some `initial` value. As we populate the
# graph, elements are unmasked (automatically) when we combine the initial
# value with the values given by real edges. At the end, we convert all
# masked values to `nonedge`. Using masked arrays fully addresses reason 1,
# but for reason 2, we would still have the issue with min and max if the
# initial values were 0.0. Note: an initial value of +inf is appropriate
# for min, while an initial value of -inf is appropriate for max. When
# working with sum, an initial value of zero is appropriate. Ideally then,
# we'd want to allow users to specify both a value for nonedges and also
# an initial value. For multi(di)graphs, the choice of the initial value
# will, in general, depend on the combinator function---sensible defaults
# can be provided.
if G.is_multigraph():
# Handle MultiGraphs and MultiDiGraphs
M = np.zeros((nlen, nlen), dtype=dtype, order=order) + np.nan
# use numpy nan-aware operations
operator={sum:np.nansum, min:np.nanmin, max:np.nanmax}
try:
op=operator[multigraph_weight]
except:
raise ValueError('multigraph_weight must be sum, min, or max')
for u,v,attrs in G.edges(data=True):
if (u in nodeset) and (v in nodeset):
i, j = index[u], index[v]
e_weight = attrs.get(weight, 1)
M[i,j] = op([e_weight, M[i,j]])
if undirected:
M[j,i] = M[i,j]
else:
# Graph or DiGraph, this is much faster than above
M = np.zeros((nlen,nlen), dtype=dtype, order=order) + np.nan
for u,nbrdict in G.adjacency():
for v,d in nbrdict.items():
try:
M[index[u],index[v]] = d.get(weight,1)
except KeyError:
# This occurs when there are fewer desired nodes than
# there are nodes in the graph: len(nodelist) < len(G)
pass
M[np.isnan(M)] = nonedge
M = np.asmatrix(M, dtype=dtype)
return M
def from_numpy_matrix(A, parallel_edges=False, create_using=None):
"""Return a graph from numpy matrix.
The numpy matrix is interpreted as an adjacency matrix for the graph.
Parameters
----------
A : numpy matrix
An adjacency matrix representation of a graph
parallel_edges : Boolean
If this is True, `create_using` is a multigraph, and `A` is an
integer matrix, then entry *(i, j)* in the matrix is interpreted as the
number of parallel edges joining vertices *i* and *j* in the graph. If it
is False, then the entries in the adjacency matrix are interpreted as
the weight of a single edge joining the vertices.
create_using : NetworkX graph
Use specified graph for result. The default is Graph()
Notes
-----
If `create_using` is an instance of :class:`networkx.MultiGraph` or
:class:`networkx.MultiDiGraph`, `parallel_edges` is True, and the
entries of `A` are of type :class:`int`, then this function returns a
multigraph (of the same type as `create_using`) with parallel edges.
If `create_using` is an undirected multigraph, then only the edges
indicated by the upper triangle of the matrix `A` will be added to the
graph.
If the numpy matrix has a single data type for each matrix entry it
will be converted to an appropriate Python data type.
If the numpy matrix has a user-specified compound data type the names
of the data fields will be used as attribute keys in the resulting
NetworkX graph.
See Also
--------
to_numpy_matrix, to_numpy_recarray
Examples
--------
Simple integer weights on edges:
>>> import numpy
>>> A=numpy.matrix([[1, 1], [2, 1]])
>>> G=nx.from_numpy_matrix(A)
If `create_using` is a multigraph and the matrix has only integer entries,
the entries will be interpreted as weighted edges joining the vertices
(without creating parallel edges):
>>> import numpy
>>> A = numpy.matrix([[1, 1], [1, 2]])
>>> G = nx.from_numpy_matrix(A, create_using = nx.MultiGraph())
>>> G[1][1]
{0: {'weight': 2}}
If `create_using` is a multigraph and the matrix has only integer entries
but `parallel_edges` is True, then the entries will be interpreted as
the number of parallel edges joining those two vertices:
>>> import numpy
>>> A = numpy.matrix([[1, 1], [1, 2]])
>>> temp = nx.MultiGraph()
>>> G = nx.from_numpy_matrix(A, parallel_edges = True, create_using = temp)
>>> G[1][1]
{0: {'weight': 1}, 1: {'weight': 1}}
User defined compound data type on edges:
>>> import numpy
>>> dt = [('weight', float), ('cost', int)]
>>> A = numpy.matrix([[(1.0, 2)]], dtype = dt)
>>> G = nx.from_numpy_matrix(A)
>>> list(G.edges())
[(0, 0)]
>>> G[0][0]['cost']
2
>>> G[0][0]['weight']
1.0
"""
# This should never fail if you have created a numpy matrix with numpy...
import numpy as np
kind_to_python_type={'f':float,
'i':int,
'u':int,
'b':bool,
'c':complex,
'S':str,
'V':'void'}
try: # Python 3.x
blurb = chr(1245) # just to trigger the exception
kind_to_python_type['U']=str
except ValueError: # Python 2.6+
kind_to_python_type['U']=unicode
G=_prep_create_using(create_using)
n,m=A.shape
if n!=m:
raise nx.NetworkXError("Adjacency matrix is not square.",
"nx,ny=%s"%(A.shape,))
dt=A.dtype
try:
python_type=kind_to_python_type[dt.kind]
except:
raise TypeError("Unknown numpy data type: %s"%dt)
# Make sure we get even the isolated nodes of the graph.
G.add_nodes_from(range(n))
# Get a list of all the entries in the matrix with nonzero entries. These
# coordinates will become the edges in the graph.
edges = zip(*(np.asarray(A).nonzero()))
# handle numpy constructed data type
if python_type is 'void':
# Sort the fields by their offset, then by dtype, then by name.
fields = sorted((offset, dtype, name) for name, (dtype, offset) in
A.dtype.fields.items())
triples = ((u, v, {name: kind_to_python_type[dtype.kind](val)
for (_, dtype, name), val in zip(fields, A[u, v])})
for u, v in edges)
# If the entries in the adjacency matrix are integers, the graph is a
# multigraph, and parallel_edges is True, then create parallel edges, each
# with weight 1, for each entry in the adjacency matrix. Otherwise, create
# one edge for each positive entry in the adjacency matrix and set the
# weight of that edge to be the entry in the matrix.
elif python_type is int and G.is_multigraph() and parallel_edges:
chain = itertools.chain.from_iterable
# The following line is equivalent to:
#
# for (u, v) in edges:
# for d in range(A[u, v]):
# G.add_edge(u, v, weight=1)
#
triples = chain(((u, v, dict(weight=1)) for d in range(A[u, v]))
for (u, v) in edges)
else: # basic data type
triples = ((u, v, dict(weight=python_type(A[u, v])))
for u, v in edges)
# If we are creating an undirected multigraph, only add the edges from the
# upper triangle of the matrix. Otherwise, add all the edges. This relies
# on the fact that the vertices created in the
# `_generated_weighted_edges()` function are actually the row/column
# indices for the matrix `A`.
#
# Without this check, we run into a problem where each edge is added twice
# when `G.add_edges_from()` is invoked below.
if G.is_multigraph() and not G.is_directed():
triples = ((u, v, d) for u, v, d in triples if u <= v)
G.add_edges_from(triples)
return G
@not_implemented_for('multigraph')
def to_numpy_recarray(G, nodelist=None, dtype=None, order=None):
"""Return the graph adjacency matrix as a NumPy recarray.
Parameters
----------
G : graph
The NetworkX graph used to construct the NumPy matrix.
nodelist : list, optional
The rows and columns are ordered according to the nodes in `nodelist`.
If `nodelist` is None, then the ordering is produced by G.nodes().
dtype : NumPy data-type, optional
A valid NumPy named dtype used to initialize the NumPy recarray.
The data type names are assumed to be keys in the graph edge attribute
dictionary.
order : {'C', 'F'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. If None, then the NumPy default
is used.
Returns
-------
M : NumPy recarray
The graph with specified edge data as a Numpy recarray
Notes
-----
When `nodelist` does not contain every node in `G`, the matrix is built
from the subgraph of `G` that is induced by the nodes in `nodelist`.
Examples
--------
>>> G = nx.Graph()
>>> G.add_edge(1,2,weight=7.0,cost=5)
>>> A=nx.to_numpy_recarray(G,dtype=[('weight',float),('cost',int)])
>>> print(A.weight)
[[ 0. 7.]
[ 7. 0.]]
>>> print(A.cost)
[[0 5]
[5 0]]
"""
if dtype is None:
dtype = [('weight', float)]
import numpy as np
if nodelist is None:
nodelist = list(G)
nodeset = set(nodelist)
if len(nodelist) != len(nodeset):
msg = "Ambiguous ordering: `nodelist` contained duplicates."
raise nx.NetworkXError(msg)
nlen=len(nodelist)
undirected = not G.is_directed()
index=dict(zip(nodelist,range(nlen)))
M = np.zeros((nlen,nlen), dtype=dtype, order=order)
names=M.dtype.names
for u,v,attrs in G.edges(data=True):
if (u in nodeset) and (v in nodeset):
i,j = index[u],index[v]
values=tuple([attrs[n] for n in names])
M[i,j] = values
if undirected:
M[j,i] = M[i,j]
return M.view(np.recarray)
def to_scipy_sparse_matrix(G, nodelist=None, dtype=None,
weight='weight', format='csr'):
"""Return the graph adjacency matrix as a SciPy sparse matrix.
Parameters
----------
G : graph
The NetworkX graph used to construct the NumPy matrix.
nodelist : list, optional
The rows and columns are ordered according to the nodes in `nodelist`.
If `nodelist` is None, then the ordering is produced by G.nodes().
dtype : NumPy data-type, optional
A valid NumPy dtype used to initialize the array. If None, then the
NumPy default is used.
weight : string or None optional (default='weight')
The edge attribute that holds the numerical value used for
the edge weight. If None then all edge weights are 1.
format : str in {'bsr', 'csr', 'csc', 'coo', 'lil', 'dia', 'dok'}
The type of the matrix to be returned (default 'csr'). For
some algorithms different implementations of sparse matrices
can perform better. See [1]_ for details.
Returns
-------
M : SciPy sparse matrix
Graph adjacency matrix.
Notes
-----
The matrix entries are populated using the edge attribute held in
parameter weight. When an edge does not have that attribute, the
value of the entry is 1.
For multiple edges the matrix values are the sums of the edge weights.
When `nodelist` does not contain every node in `G`, the matrix is built
from the subgraph of `G` that is induced by the nodes in `nodelist`.
Uses coo_matrix format. To convert to other formats specify the
format= keyword.
The convention used for self-loop edges in graphs is to assign the
diagonal matrix entry value to the weight attribute of the edge
(or the number 1 if the edge has no weight attribute). If the
alternate convention of doubling the edge weight is desired the
resulting Scipy sparse matrix can be modified as follows:
>>> import scipy as sp
>>> G = nx.Graph([(1,1)])
>>> A = nx.to_scipy_sparse_matrix(G)
>>> print(A.todense())
[[1]]
>>> A.setdiag(A.diagonal()*2)
>>> print(A.todense())
[[2]]
Examples
--------
>>> G = nx.MultiDiGraph()
>>> G.add_edge(0,1,weight=2)
>>> G.add_edge(1,0)
>>> G.add_edge(2,2,weight=3)
>>> G.add_edge(2,2)
>>> S = nx.to_scipy_sparse_matrix(G, nodelist=[0,1,2])
>>> print(S.todense())
[[0 2 0]
[1 0 0]
[0 0 4]]
References
----------
.. [1] Scipy Dev. References, "Sparse Matrices",
http://docs.scipy.org/doc/scipy/reference/sparse.html
"""
from scipy import sparse
if nodelist is None:
nodelist = list(G)
nlen = len(nodelist)
if nlen == 0:
raise nx.NetworkXError("Graph has no nodes or edges")
if len(nodelist) != len(set(nodelist)):
msg = "Ambiguous ordering: `nodelist` contained duplicates."
raise nx.NetworkXError(msg)
index = dict(zip(nodelist,range(nlen)))
if G.number_of_edges() == 0:
row,col,data=[],[],[]
else:
row,col,data = zip(*((index[u],index[v],d.get(weight,1))
for u,v,d in G.edges(nodelist, data=True)
if u in index and v in index))
if G.is_directed():
M = sparse.coo_matrix((data,(row,col)),
shape=(nlen,nlen), dtype=dtype)
else:
# symmetrize matrix
d = data + data
r = row + col
c = col + row
# selfloop entries get double counted when symmetrizing
# so we subtract the data on the diagonal
selfloops = list(G.selfloop_edges(data=True))
if selfloops:
diag_index,diag_data = zip(*((index[u],-d.get(weight,1))
for u,v,d in selfloops
if u in index and v in index))
d += diag_data
r += diag_index
c += diag_index
M = sparse.coo_matrix((d, (r, c)), shape=(nlen,nlen), dtype=dtype)
try:
return M.asformat(format)
except AttributeError:
raise nx.NetworkXError("Unknown sparse matrix format: %s"%format)
def _csr_gen_triples(A):
"""Converts a SciPy sparse matrix in **Compressed Sparse Row** format to
an iterable of weighted edge triples.
"""
nrows = A.shape[0]
data, indices, indptr = A.data, A.indices, A.indptr
for i in range(nrows):
for j in range(indptr[i], indptr[i+1]):
yield i, indices[j], data[j]
def _csc_gen_triples(A):
"""Converts a SciPy sparse matrix in **Compressed Sparse Column** format to
an iterable of weighted edge triples.
"""
ncols = A.shape[1]
data, indices, indptr = A.data, A.indices, A.indptr
for i in range(ncols):
for j in range(indptr[i], indptr[i+1]):
yield indices[j], i, data[j]
def _coo_gen_triples(A):
"""Converts a SciPy sparse matrix in **Coordinate** format to an iterable
of weighted edge triples.
"""
row, col, data = A.row, A.col, A.data
return zip(row, col, data)
def _dok_gen_triples(A):
"""Converts a SciPy sparse matrix in **Dictionary of Keys** format to an
iterable of weighted edge triples.
"""
for (r, c), v in A.items():
yield r, c, v
def _generate_weighted_edges(A):
"""Returns an iterable over (u, v, w) triples, where u and v are adjacent
vertices and w is the weight of the edge joining u and v.
`A` is a SciPy sparse matrix (in any format).
"""
if A.format == 'csr':
return _csr_gen_triples(A)
if A.format == 'csc':
return _csc_gen_triples(A)
if A.format == 'dok':
return _dok_gen_triples(A)
# If A is in any other format (including COO), convert it to COO format.
return _coo_gen_triples(A.tocoo())
def from_scipy_sparse_matrix(A, parallel_edges=False, create_using=None,
edge_attribute='weight'):
"""Creates a new graph from an adjacency matrix given as a SciPy sparse
matrix.
Parameters
----------
A: scipy sparse matrix
An adjacency matrix representation of a graph
parallel_edges : Boolean
If this is True, `create_using` is a multigraph, and `A` is an
integer matrix, then entry *(i, j)* in the matrix is interpreted as the
number of parallel edges joining vertices *i* and *j* in the graph. If it
is False, then the entries in the adjacency matrix are interpreted as
the weight of a single edge joining the vertices.
create_using: NetworkX graph
Use specified graph for result. The default is Graph()
edge_attribute: string
Name of edge attribute to store matrix numeric value. The data will
have the same type as the matrix entry (int, float, (real,imag)).
Notes
-----
If `create_using` is an instance of :class:`networkx.MultiGraph` or
:class:`networkx.MultiDiGraph`, `parallel_edges` is True, and the
entries of `A` are of type :class:`int`, then this function returns a
multigraph (of the same type as `create_using`) with parallel edges.
In this case, `edge_attribute` will be ignored.
If `create_using` is an undirected multigraph, then only the edges
indicated by the upper triangle of the matrix `A` will be added to the
graph.
Examples
--------
>>> import scipy.sparse
>>> A = scipy.sparse.eye(2,2,1)
>>> G = nx.from_scipy_sparse_matrix(A)
If `create_using` is a multigraph and the matrix has only integer entries,
the entries will be interpreted as weighted edges joining the vertices
(without creating parallel edges):
>>> import scipy
>>> A = scipy.sparse.csr_matrix([[1, 1], [1, 2]])
>>> G = nx.from_scipy_sparse_matrix(A, create_using=nx.MultiGraph())
>>> G[1][1]
{0: {'weight': 2}}
If `create_using` is a multigraph and the matrix has only integer entries
but `parallel_edges` is True, then the entries will be interpreted as
the number of parallel edges joining those two vertices:
>>> import scipy
>>> A = scipy.sparse.csr_matrix([[1, 1], [1, 2]])
>>> G = nx.from_scipy_sparse_matrix(A, parallel_edges=True,
... create_using=nx.MultiGraph())
>>> G[1][1]
{0: {'weight': 1}, 1: {'weight': 1}}
"""
G = _prep_create_using(create_using)
n,m = A.shape
if n != m:
raise nx.NetworkXError(\
"Adjacency matrix is not square. nx,ny=%s"%(A.shape,))
# Make sure we get even the isolated nodes of the graph.
G.add_nodes_from(range(n))
# Create an iterable over (u, v, w) triples and for each triple, add an
# edge from u to v with weight w.
triples = _generate_weighted_edges(A)
# If the entries in the adjacency matrix are integers, the graph is a
# multigraph, and parallel_edges is True, then create parallel edges, each
# with weight 1, for each entry in the adjacency matrix. Otherwise, create
# one edge for each positive entry in the adjacency matrix and set the
# weight of that edge to be the entry in the matrix.
if A.dtype.kind in ('i', 'u') and G.is_multigraph() and parallel_edges:
chain = itertools.chain.from_iterable
# The following line is equivalent to:
#
# for (u, v) in edges:
# for d in range(A[u, v]):
# G.add_edge(u, v, weight=1)
#
triples = chain(((u, v, 1) for d in range(w)) for (u, v, w) in triples)
# If we are creating an undirected multigraph, only add the edges from the
# upper triangle of the matrix. Otherwise, add all the edges. This relies
# on the fact that the vertices created in the
# `_generated_weighted_edges()` function are actually the row/column
# indices for the matrix `A`.
#
# Without this check, we run into a problem where each edge is added twice
# when `G.add_weighted_edges_from()` is invoked below.
if G.is_multigraph() and not G.is_directed():
triples = ((u, v, d) for u, v, d in triples if u <= v)
G.add_weighted_edges_from(triples, weight=edge_attribute)
return G
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import numpy
except:
raise SkipTest("NumPy not available")
try:
import scipy
except:
raise SkipTest("SciPy not available")
| bsd-3-clause |
AtsushiSakai/PythonRobotics | PathPlanning/DepthFirstSearch/depth_first_search.py | 1 | 7598 | """
Depth-First grid planning
author: Erwin Lejeune (@spida_rwin)
See Wikipedia article (https://en.wikipedia.org/wiki/Depth-first_search)
"""
import math
import matplotlib.pyplot as plt
show_animation = True
class DepthFirstSearchPlanner:
def __init__(self, ox, oy, reso, rr):
"""
Initialize grid map for Depth-First planning
ox: x position list of Obstacles [m]
oy: y position list of Obstacles [m]
resolution: grid resolution [m]
rr: robot radius[m]
"""
self.reso = reso
self.rr = rr
self.calc_obstacle_map(ox, oy)
self.motion = self.get_motion_model()
class Node:
def __init__(self, x, y, cost, parent_index, parent):
self.x = x # index of grid
self.y = y # index of grid
self.cost = cost
self.parent_index = parent_index
self.parent = parent
def __str__(self):
return str(self.x) + "," + str(self.y) + "," + str(
self.cost) + "," + str(self.parent_index)
def planning(self, sx, sy, gx, gy):
"""
Depth First search
input:
s_x: start x position [m]
s_y: start y position [m]
gx: goal x position [m]
gy: goal y position [m]
output:
rx: x position list of the final path
ry: y position list of the final path
"""
nstart = self.Node(self.calc_xyindex(sx, self.minx),
self.calc_xyindex(sy, self.miny), 0.0, -1, None)
ngoal = self.Node(self.calc_xyindex(gx, self.minx),
self.calc_xyindex(gy, self.miny), 0.0, -1, None)
open_set, closed_set = dict(), dict()
open_set[self.calc_grid_index(nstart)] = nstart
while 1:
if len(open_set) == 0:
print("Open set is empty..")
break
current = open_set.pop(list(open_set.keys())[-1])
c_id = self.calc_grid_index(current)
# show graph
if show_animation: # pragma: no cover
plt.plot(self.calc_grid_position(current.x, self.minx),
self.calc_grid_position(current.y, self.miny), "xc")
# for stopping simulation with the esc key.
plt.gcf().canvas.mpl_connect('key_release_event',
lambda event:
[exit(0) if event.key == 'escape'
else None])
plt.pause(0.01)
if current.x == ngoal.x and current.y == ngoal.y:
print("Find goal")
ngoal.parent_index = current.parent_index
ngoal.cost = current.cost
break
# expand_grid search grid based on motion model
for i, _ in enumerate(self.motion):
node = self.Node(current.x + self.motion[i][0],
current.y + self.motion[i][1],
current.cost + self.motion[i][2], c_id, None)
n_id = self.calc_grid_index(node)
# If the node is not safe, do nothing
if not self.verify_node(node):
continue
if n_id not in closed_set:
open_set[n_id] = node
closed_set[n_id] = node
node.parent = current
rx, ry = self.calc_final_path(ngoal, closed_set)
return rx, ry
def calc_final_path(self, ngoal, closedset):
# generate final course
rx, ry = [self.calc_grid_position(ngoal.x, self.minx)], [
self.calc_grid_position(ngoal.y, self.miny)]
n = closedset[ngoal.parent_index]
while n is not None:
rx.append(self.calc_grid_position(n.x, self.minx))
ry.append(self.calc_grid_position(n.y, self.miny))
n = n.parent
return rx, ry
def calc_grid_position(self, index, minp):
"""
calc grid position
:param index:
:param minp:
:return:
"""
pos = index * self.reso + minp
return pos
def calc_xyindex(self, position, min_pos):
return round((position - min_pos) / self.reso)
def calc_grid_index(self, node):
return (node.y - self.miny) * self.xwidth + (node.x - self.minx)
def verify_node(self, node):
px = self.calc_grid_position(node.x, self.minx)
py = self.calc_grid_position(node.y, self.miny)
if px < self.minx:
return False
elif py < self.miny:
return False
elif px >= self.maxx:
return False
elif py >= self.maxy:
return False
# collision check
if self.obmap[node.x][node.y]:
return False
return True
def calc_obstacle_map(self, ox, oy):
self.minx = round(min(ox))
self.miny = round(min(oy))
self.maxx = round(max(ox))
self.maxy = round(max(oy))
print("min_x:", self.minx)
print("min_y:", self.miny)
print("max_x:", self.maxx)
print("max_y:", self.maxy)
self.xwidth = round((self.maxx - self.minx) / self.reso)
self.ywidth = round((self.maxy - self.miny) / self.reso)
print("x_width:", self.xwidth)
print("y_width:", self.ywidth)
# obstacle map generation
self.obmap = [[False for _ in range(self.ywidth)]
for _ in range(self.xwidth)]
for ix in range(self.xwidth):
x = self.calc_grid_position(ix, self.minx)
for iy in range(self.ywidth):
y = self.calc_grid_position(iy, self.miny)
for iox, ioy in zip(ox, oy):
d = math.hypot(iox - x, ioy - y)
if d <= self.rr:
self.obmap[ix][iy] = True
break
@staticmethod
def get_motion_model():
# dx, dy, cost
motion = [[1, 0, 1],
[0, 1, 1],
[-1, 0, 1],
[0, -1, 1],
[-1, -1, math.sqrt(2)],
[-1, 1, math.sqrt(2)],
[1, -1, math.sqrt(2)],
[1, 1, math.sqrt(2)]]
return motion
def main():
print(__file__ + " start!!")
# start and goal position
sx = 10.0 # [m]
sy = 10.0 # [m]
gx = 50.0 # [m]
gy = 50.0 # [m]
grid_size = 2.0 # [m]
robot_radius = 1.0 # [m]
# set obstacle positions
ox, oy = [], []
for i in range(-10, 60):
ox.append(i)
oy.append(-10.0)
for i in range(-10, 60):
ox.append(60.0)
oy.append(i)
for i in range(-10, 61):
ox.append(i)
oy.append(60.0)
for i in range(-10, 61):
ox.append(-10.0)
oy.append(i)
for i in range(-10, 40):
ox.append(20.0)
oy.append(i)
for i in range(0, 40):
ox.append(40.0)
oy.append(60.0 - i)
if show_animation: # pragma: no cover
plt.plot(ox, oy, ".k")
plt.plot(sx, sy, "og")
plt.plot(gx, gy, "xb")
plt.grid(True)
plt.axis("equal")
dfs = DepthFirstSearchPlanner(ox, oy, grid_size, robot_radius)
rx, ry = dfs.planning(sx, sy, gx, gy)
if show_animation: # pragma: no cover
plt.plot(rx, ry, "-r")
plt.pause(0.01)
plt.show()
if __name__ == '__main__':
main()
| mit |
rheinheimer/HydraApp-WEAPImport | weap_template.py | 1 | 10766 | # import
import pandas as pd
from os.path import join
import xml.etree.ElementTree as ET
import os
import zipfile
from pypxlib import Table
from collections import OrderedDict
import shutil
'''
a shortcut to ET.SubElenment(parent, child).text ( and *.text = text )
'''
def add_node(parent, child, text):
if type(text)==str:
ET.SubElement(parent, child).text = text
else:
ET.SubElement(parent, child)
# add resource attribute
def add_attribute(resources, resource_name, attr_dict):
for resource in resources:
if resource.find('name').text == resource_name:
attr = ET.SubElement(resource, 'attribute')
for attr_name, attr_text in attr_dict.items():
add_node(attr, attr_name, attr_text)
'''
pretty print xml from http://effbot.org/zone/element-lib.htm#prettyprint
'''
def indent(elem, level=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
'''
zip a directory - pilfered from the internet
'''
def zipdir(path, ziph):
# ziph is zipfile handle
for root, dirs, files in os.walk(path):
for file in files:
ziph.write(os.path.join(root, file))
def create_xml_template(tpl_name):
# load features and variables from csv
features_all = pd.read_csv('./WEAP_default_features.csv')
variables_all = pd.read_csv('./WEAP_default_variables.csv')
var_features = variables_all.feature.unique()
cat_features = features_all.feature.unique()
# template name
tpl = ET.Element('template_definition')
#tree = ET.ElementTree(tpl)
ET.SubElement(tpl, 'template_name').text = tpl_name
# define link lines
width = 4
linkstyle = {}
linkstyle['river'] = ['solid', 'blue', width]
linkstyle['diversion'] = ['solid', 'orange', width]
linkstyle['transmission_link'] = ['solid', 'green', width]
linkstyle['return_flow'] = ['solid', 'red', width]
linkstyle['runoff_infiltration'] = ['dashed', 'blue', width]
# add layout
layout = ET.SubElement(tpl, 'layout')
item = ET.SubElement(layout, 'item')
add_node(item, 'name', 'grouping')
value = ET.SubElement(item, 'value')
add_node(value, 'name', tpl_name)
add_node(value, 'description', tpl_name)
categories = ET.SubElement(value, 'categories')
cats = features_all.category.unique()
for cat in cats:
category = ET.SubElement(categories, 'category')
for attr in ['name', 'description', 'displayname']:
add_node(category, attr, cat)
groups = ET.SubElement(category, 'groups')
features = features_all[features_all.category==cat]
for f in features.itertuples():
if f.feature not in var_features:
pass
group = ET.SubElement(groups, 'group')
add_node(group, 'name', f.feature)
add_node(group, 'description', f.description)
add_node(group, 'displayname', f.displayname)
add_node(group, 'image', 'images\\%s.png' % f.feature)
# add resources
resources = ET.SubElement(tpl, 'resources')
# add a blank NETWORK resource if no NETWORK variables exist
if 'NETWORK' not in categories:
resource = ET.SubElement(resources, 'resource')
add_node(resource, 'type', 'NETWORK')
add_node(resource, 'name', 'key_assumptions')
# add features and variables
#for feature in var_features:
for f in features_all.itertuples():
if f.feature=='catchment':
pass
if f.feature not in cat_features:
continue
# get resource category
category = features_all[features_all.feature==f.feature].category.iloc[0][:-1].upper()
# add the resource subelement
resource = ET.SubElement(resources, 'resource')
# add resource layout info
add_node(resource, 'type', category)
add_node(resource, 'name', f.feature)
layout = ET.SubElement(resource, 'layout')
item = ET.SubElement(layout, 'item')
add_node(item, 'name', 'image')
add_node(item, 'value', 'images\\'+f.feature+'.png')
if category == 'LINK':
for i, iname in enumerate(['symbol','colour','line_weight']):
item = ET.SubElement(layout, 'item')
add_node(item, 'name', iname)
add_node(item, 'value', str(linkstyle[f.feature][i]))
item = ET.SubElement(layout, 'item')
add_node(item, 'name', 'group')
add_node(item, 'value', f.feature)
# add variables
feature_variables = variables_all[variables_all.feature == f.feature]
for v in feature_variables.itertuples():
if v.variable_type=='Water Quality':
continue
attr = ET.SubElement(resource, 'attribute')
add_node(attr, 'name', v.variable_name.replace(' ', '_'))
add_node(attr, 'dimension', v.dimension)
add_node(attr, 'unit', v.hydra_unit)
add_node(attr, 'is_var', 'N')
add_node(attr, 'data_type', 'descriptor')
# add basic result variables - inflow/outflow
for v in ['inflow','outflow']:
attr = ET.SubElement(resource, 'attribute')
add_node(attr, 'name', v)
add_node(attr, 'dimension', 'Volume')
add_node(attr, 'unit', '1e6 m^3')
add_node(attr, 'is_var', 'Y')
add_node(attr, 'data_type', 'timeseries')
return tpl#, tree
def make_type_dict(weapdir):
typedefs = Table(join(weapdir, '_Dictionary', 'NodeTypes.DB'))
type_dict = {}
for t in typedefs:
type_dict[t.TypeID] = str(t.Name.lower()).replace(' ','_').replace('/','_')
return type_dict
'''
convert paradox db to pandas df
'''
def px_to_df(pxdb):
with Table(pxdb) as units:
fields = list(units.fields)
rows = [(row[fields[0]], [row[field] for field in fields[1:]]) for row in units]
df = pd.DataFrame.from_items(rows, orient='index', columns=fields[1:])
return df
def add_custom_variables(tpl, weapdir, area):
areadir = join(weapdir, area)
# lookup dataframes for...
# type:
type_df = px_to_df(pxdb = join(weapdir, '_Dictionary', 'NodeTypes.DB'))
# catagory:
category_df = px_to_df(pxdb = join(weapdir, '_Dictionary', 'Category.DB'))
# units:
units_df = px_to_df(pxdb = join(areadir, 'Units.DB'))
# weap-hydra units
weap_hydra_units_df = pd.read_csv('weap_hydra_units.csv', index_col=0)
resources = tpl.find('resources')
# read user variables database
with Table(file_path=join(areadir, 'UserVariables.db')) as uservariables:
# loop through all user variables and add them to the template
for v in uservariables:
attr_dict = {}
# feature name
if v.TypeID:
resource_name = str(type_df.loc[v.TypeID].Name).lower().replace(' ','_').replace('/', '_')
else:
category = category_df.loc[v.CategoryID].Name
if category == 'Treatment': resource_name = 'Wastewater_Treatment_Plant'
elif category == 'Water Use': resource_name = 'Demand_Site'
# need to add more categories if needed, perhaps from lookup table
# determine units
weap_unit_name = units_df.loc[-v.NumUnitFieldID].Name
hydra_unit_abbr = weap_hydra_units_df.loc[weap_unit_name].Hydra_abbr
# data type
if v.IsInteger:
v_data_type = 'scalar'
else:
v_data_type = 'timeseries'
# write the variable info to a dictionary
attr_dict = OrderedDict()
attr_dict['name'] = str(v.DisplayLabel).replace(' ','_')
#attr_dict['description'] = v.GridComment
attr_dict['dimension'] = 'Volume'
attr_dict['unit'] = hydra_unit_abbr
attr_dict['is_var'] = 'Y'
attr_dict['data_type'] = v_data_type
# write the variables to template, under resources
add_attribute(resources, resource_name, attr_dict)
def write_template_xml(tpl, tree, tpl_name):
# prettify
indent(tpl)
# write to file
fout = join(tpl_name, './template/template.xml')
tree.write(fout)
def create_template_zipfile(tpl_name):
# create the zipfile
zipf = zipfile.ZipFile(tpl_name + '.zip', 'w', zipfile.ZIP_DEFLATED)
zipd = tpl_name + '/template'
zipdir(zipd, zipf)
zipf.close()
def main(tpl_name, custom_area, weapdir, write_template=True, direct_import=True, outdir=None):
# check if input requirements are met
if write_template and outdir==None:
return
# create template xml
tpl = create_xml_template(tpl_name)
# update template from specific model
if custom_area:
add_custom_variables(tpl, weapdir, custom_area)
# create tree
tree = ET.ElementTree(tpl)
## 1. write template to xml file and create hydra-friendly zip file
if write_template:
# remove old template directory
tpl_path = join(outdir, tpl_name)
if os.path.exists(tpl_path):
shutil.rmtree(tpl_path)
# create new template directory
os.mkdir(tpl_path)
shutil.copytree(src='template', dst=join(tpl_path, 'template'))
# write template xml to file
write_template_xml(tpl, tree, tpl_name)
# create template zipfile for import to Hydra
create_template_zipfile(tpl_name)
## 2. import xml directly
if __name__ == '__main__':
weapdir = r'C:\Users\L03060467\Documents\WEAP Areas'
#custom_area = 'Weaping River Basin'
custom_area = None
if custom_area:
tpl_name = custom_area
else:
tpl_name = 'WEAP'
outdir = '.'
write_template = True
direct_import = False
main(tpl_name, custom_area, weapdir=weapdir, write_template=True, direct_import=False, outdir=outdir)
print('finished') | mit |
homeslike/OpticalTweezer | scripts/p0.8_at0.05/vCOMhistogram.py | 28 | 2448 | import math
import sys
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
from subprocess import call
from scipy.stats import norm
# proc = call("ls *.dat",shell=True)
# datetime = "170123_2033_"
datetime = sys.argv[1]+"_"
gasTempDataIn = np.genfromtxt(datetime+"gasTempData.dat",usecols=0,skip_header=100)
gasTempDataOut = np.genfromtxt(datetime+"gasTempData.dat",usecols=1,skip_header=100)
vCOMData_x = np.genfromtxt(datetime+"vCOMData.dat",usecols=0,skip_header=100)
vCOMData_y = np.genfromtxt(datetime+"vCOMData.dat",usecols=1,skip_header=100)
vCOMData_z = np.genfromtxt(datetime+"vCOMData.dat",usecols=2,skip_header=100)
N = 32
vSqd = []
for i in range(0,len(vCOMData_x)):
vSqd.append((vCOMData_x[i]*vCOMData_x[i]+vCOMData_x[i]*vCOMData_x[i]+vCOMData_x[i]*vCOMData_x[i])*0.5)
vSqdMean = np.mean(vSqd)
histogram_x,bins_x = np.histogram(vCOMData_x,bins=100,normed=True)
histogram_y,bins_y = np.histogram(vCOMData_y,bins=100,normed=True)
histogram_z,bins_z = np.histogram(vCOMData_z,bins=100,normed=True)
inTemp = np.mean(gasTempDataIn)
outTemp = np.mean(gasTempDataOut)
statistics = open(datetime+"statistics.dat","w")
statistics.write("GasIn: " + str(inTemp)+"\n")
statistics.write("GasOut: " + str(outTemp)+"\n")
statistics.write("T_COM: " + str(2./3. * vSqdMean)+"\n")
statistics.write("Mu_x " + str(np.mean(vCOMData_x))+"\n")
statistics.write("Sigma_x: " + str(np.std(vCOMData_x))+"\n")
statistics.write("Mu_y " + str(np.mean(vCOMData_y))+"\n")
statistics.write("Sigma_y: " + str(np.std(vCOMData_y))+"\n")
statistics.write("Mu_z " + str(np.mean(vCOMData_z))+"\n")
statistics.write("Sigma_z: " + str(np.std(vCOMData_z))+"\n")
histogram_x_file = open(datetime+"histogram_vx.dat","w")
histogram_y_file = open(datetime+"histogram_vy.dat","w")
histogram_z_file = open(datetime+"histogram_vz.dat","w")
for i in range(0,len(histogram_x)):
histogram_x_file.write(str(bins_x[i]) + "\t" + str(histogram_x[i]) + "\n")
histogram_y_file.write(str(bins_y[i]) + "\t" + str(histogram_y[i]) + "\n")
histogram_z_file.write(str(bins_z[i]) + "\t" + str(histogram_z[i]) + "\n")
# plt.figure(1)
# plt.hist(vCOMData_x,bins=100)
# plt.figure(2)
# plt.hist(vCOMData_y,bins=100)
# plt.figure(3)
# plt.hist(vCOMData_z,bins=100)
# plt.show()
# plt.figure(1)
# plt.plot(vSqd)
# plt.plot((0,700),(vSqdMean,vSqdMean))
# plt.figure(2)
# plt.hist(vCOMData_x,bins=100,normed=True)
# plt.plot(x,gasInPDF)
# plt.show()
| mit |
Lucas-Armand/genetic-algorithm | dev/9ºSemana/testes of speed.py | 5 | 3255 | # -*- coding: utf-8 -*-
import os
import csv
import random
import numpy as np
import timeit
import time as Time
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from itertools import product, combinations
class Block:
def __init__(self,point,a,b,c,weight,btype):
self.p=point
self.a=a
self.b=b
self.c=c
self.w=weight
self.t=btype
def csv_read(name): #Metodo de leitura, transforma um arquivo CSV em um vetor
CSV=open(name,'r')
dados=CSV.read()
dados=dados.replace(',','.')
dados=dados.replace(';',',')
CSV.close()
CSV=open("temp.csv",'w')
CSV.write(dados)
CSV.close()
CSV=open("temp.csv",'r')
dados=csv.reader(CSV)
v=[]
for i in dados:
I=[]
for j in i:
try:
j = float(j)
except:
pass
I.append(j)
v.append(I)
CSV.close()
os.remove("temp.csv")
return (v)
def defineGeometry(name):
vect = csv_read(name)
blockNumber ={}
for i in vect:
a = i[1]
b = i[2]
c = i[3]
point = [i[4],i[5],i[6]]
weight = i[7]
btype = i[-1]
block = Block(point,a,b,c,weight,btype)
blockNumber[i[0]] = block
return blockNumber
bNumb=defineGeometry('GeometriaNavio.csv')
# Define vicinity
#deck
vicinity={1:[2]}
for i in range(2,16):
vicinity[i] = [i-1,i+1]
vicinity[16] = [15]
#side
vicinity[17] = [18,19]
vicinity[18] = [17,20]
for i in range(19,31):
v = i-1 if i%2==0 else i+1
vicinity[i] = [v,i-2,i+2]
vicinity[31] = [29,32]
vicinity[32] = [30,31]
#bott
vicinity[33] = [34,35]
vicinity[34] = [33,36]
for i in range(35,63):
v = i-1 if i%2==0 else i+1
vicinity[i] = [v,i-2,i+2]
vicinity[63] = [61,64]
vicinity[64] = [63,62]
#coff
vicinity[65] = [66]
for i in range(66,70):
vicinity[i] = [i-1,i+1]
vicinity[70] = [69]
alfa = 10
beta = 1
built = []
time = 0
append = built.append
def order(x): return vicinity[x]
def time(bNumb,vicinity,chromo):
t_time = Time.time()
alfa = 1
built = []
time = 0
append = built.append
def time_vector(x,y):
for i in y:
if i in built:
time = alfa
break
try:time
except: time = 10*alfa
append(x)
return time
vic = [vicinity[x] for x in chromo]
time = sum((time_vector(x,y) for x,y in zip(chromo,vic)))
return time
chromo = [44, 39, 56, 47, 49, 37, 42, 46, 51, 58, 60, 62, 52, 41, 35, 33, 50, 61, 54, 34, 59, 43, 48, 45, 55, 53, 38, 57, 64, 67, 68, 63, 40, 36, 21, 66, 22, 6, 20, 65, 18, 5, 17, 69, 28, 27, 70, 29, 1, 12, 30, 13, 14, 26, 31, 24, 19, 2, 3, 4, 25, 11, 32, 10, 15, 16, 9, 23, 7, 8]
import cProfile
cProfile.run('time(bNumb,vicinity,chromo)')
##
##print timeit.timeit(setup='from __main__ import chromo;'+
## 'from __main__ import bNumb;'+
## 'from __main__ import time;'+
## 'from __main__ import vicinity '
## ,stmt='time(bNumb,vicinity,chromo)')
#print t.timeit(number = 1000000)
| gpl-3.0 |
ljbade/libswiftnav | python/docs/extensions/ipython_directive.py | 31 | 27191 | # -*- coding: utf-8 -*-
"""Sphinx directive to support embedded IPython code.
This directive allows pasting of entire interactive IPython sessions, prompts
and all, and their code will actually get re-executed at doc build time, with
all prompts renumbered sequentially. It also allows you to input code as a pure
python input by giving the argument python to the directive. The output looks
like an interactive ipython section.
To enable this directive, simply list it in your Sphinx ``conf.py`` file
(making sure the directory where you placed it is visible to sphinx, as is
needed for all Sphinx directives).
By default this directive assumes that your prompts are unchanged IPython ones,
but this can be customized. The configurable options that can be placed in
conf.py are
ipython_savefig_dir:
The directory in which to save the figures. This is relative to the
Sphinx source directory. The default is `html_static_path`.
ipython_rgxin:
The compiled regular expression to denote the start of IPython input
lines. The default is re.compile('In \[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_rgxout:
The compiled regular expression to denote the start of IPython output
lines. The default is re.compile('Out\[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_promptin:
The string to represent the IPython input prompt in the generated ReST.
The default is 'In [%d]:'. This expects that the line numbers are used
in the prompt.
ipython_promptout:
The string to represent the IPython prompt in the generated ReST. The
default is 'Out [%d]:'. This expects that the line numbers are used
in the prompt.
ToDo
----
- Turn the ad-hoc test() function into a real test suite.
- Break up ipython-specific functionality from matplotlib stuff into better
separated code.
Authors
-------
- John D Hunter: orignal author.
- Fernando Perez: refactoring, documentation, cleanups, port to 0.11.
- VáclavŠmilauer <eudoxos-AT-arcig.cz>: Prompt generalizations.
- Skipper Seabold, refactoring, cleanups, pure python addition
"""
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Stdlib
import cStringIO
import os
import re
import sys
import tempfile
import ast
# To keep compatibility with various python versions
try:
from hashlib import md5
except ImportError:
from md5 import md5
# Third-party
import matplotlib
import sphinx
from docutils.parsers.rst import directives
from docutils import nodes
from sphinx.util.compat import Directive
matplotlib.use('Agg')
# Our own
from IPython import Config, InteractiveShell
from IPython.core.profiledir import ProfileDir
from IPython.utils import io
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
# for tokenizing blocks
COMMENT, INPUT, OUTPUT = range(3)
#-----------------------------------------------------------------------------
# Functions and class declarations
#-----------------------------------------------------------------------------
def block_parser(part, rgxin, rgxout, fmtin, fmtout):
"""
part is a string of ipython text, comprised of at most one
input, one ouput, comments, and blank lines. The block parser
parses the text into a list of::
blocks = [ (TOKEN0, data0), (TOKEN1, data1), ...]
where TOKEN is one of [COMMENT | INPUT | OUTPUT ] and
data is, depending on the type of token::
COMMENT : the comment string
INPUT: the (DECORATOR, INPUT_LINE, REST) where
DECORATOR: the input decorator (or None)
INPUT_LINE: the input as string (possibly multi-line)
REST : any stdout generated by the input line (not OUTPUT)
OUTPUT: the output string, possibly multi-line
"""
block = []
lines = part.split('\n')
N = len(lines)
i = 0
decorator = None
while 1:
if i==N:
# nothing left to parse -- the last line
break
line = lines[i]
i += 1
line_stripped = line.strip()
if line_stripped.startswith('#'):
block.append((COMMENT, line))
continue
if line_stripped.startswith('@'):
# we're assuming at most one decorator -- may need to
# rethink
decorator = line_stripped
continue
# does this look like an input line?
matchin = rgxin.match(line)
if matchin:
lineno, inputline = int(matchin.group(1)), matchin.group(2)
# the ....: continuation string
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
Nc = len(continuation)
# input lines can continue on for more than one line, if
# we have a '\' line continuation char or a function call
# echo line 'print'. The input line can only be
# terminated by the end of the block or an output line, so
# we parse out the rest of the input line if it is
# multiline as well as any echo text
rest = []
while i<N:
# look ahead; if the next line is blank, or a comment, or
# an output line, we're done
nextline = lines[i]
matchout = rgxout.match(nextline)
#print "nextline=%s, continuation=%s, starts=%s"%(nextline, continuation, nextline.startswith(continuation))
if matchout or nextline.startswith('#'):
break
elif nextline.startswith(continuation):
inputline += '\n' + nextline[Nc:]
else:
rest.append(nextline)
i+= 1
block.append((INPUT, (decorator, inputline, '\n'.join(rest))))
continue
# if it looks like an output line grab all the text to the end
# of the block
matchout = rgxout.match(line)
if matchout:
lineno, output = int(matchout.group(1)), matchout.group(2)
if i<N-1:
output = '\n'.join([output] + lines[i:])
block.append((OUTPUT, output))
break
return block
class EmbeddedSphinxShell(object):
"""An embedded IPython instance to run inside Sphinx"""
def __init__(self):
self.cout = cStringIO.StringIO()
# Create config object for IPython
config = Config()
config.Global.display_banner = False
config.Global.exec_lines = ['import numpy as np',
'from pylab import *'
]
config.InteractiveShell.autocall = False
config.InteractiveShell.autoindent = False
config.InteractiveShell.colors = 'NoColor'
# create a profile so instance history isn't saved
tmp_profile_dir = tempfile.mkdtemp(prefix='profile_')
profname = 'auto_profile_sphinx_build'
pdir = os.path.join(tmp_profile_dir,profname)
profile = ProfileDir.create_profile_dir(pdir)
# Create and initialize ipython, but don't start its mainloop
IP = InteractiveShell.instance(config=config, profile_dir=profile)
# io.stdout redirect must be done *after* instantiating InteractiveShell
io.stdout = self.cout
io.stderr = self.cout
# For debugging, so we can see normal output, use this:
#from IPython.utils.io import Tee
#io.stdout = Tee(self.cout, channel='stdout') # dbg
#io.stderr = Tee(self.cout, channel='stderr') # dbg
# Store a few parts of IPython we'll need.
self.IP = IP
self.user_ns = self.IP.user_ns
self.user_global_ns = self.IP.user_global_ns
self.input = ''
self.output = ''
self.is_verbatim = False
self.is_doctest = False
self.is_suppress = False
# on the first call to the savefig decorator, we'll import
# pyplot as plt so we can make a call to the plt.gcf().savefig
self._pyplot_imported = False
def clear_cout(self):
self.cout.seek(0)
self.cout.truncate(0)
def process_input_line(self, line, store_history=True):
"""process the input, capturing stdout"""
#print "input='%s'"%self.input
stdout = sys.stdout
splitter = self.IP.input_splitter
try:
sys.stdout = self.cout
splitter.push(line)
more = splitter.push_accepts_more()
if not more:
source_raw = splitter.source_raw_reset()[1]
self.IP.run_cell(source_raw, store_history=store_history)
finally:
sys.stdout = stdout
def process_image(self, decorator):
"""
# build out an image directive like
# .. image:: somefile.png
# :width 4in
#
# from an input like
# savefig somefile.png width=4in
"""
savefig_dir = self.savefig_dir
source_dir = self.source_dir
saveargs = decorator.split(' ')
filename = saveargs[1]
# insert relative path to image file in source
outfile = os.path.relpath(os.path.join(savefig_dir,filename),
source_dir)
imagerows = ['.. image:: %s'%outfile]
for kwarg in saveargs[2:]:
arg, val = kwarg.split('=')
arg = arg.strip()
val = val.strip()
imagerows.append(' :%s: %s'%(arg, val))
image_file = os.path.basename(outfile) # only return file name
image_directive = '\n'.join(imagerows)
return image_file, image_directive
# Callbacks for each type of token
def process_input(self, data, input_prompt, lineno):
"""Process data block for INPUT token."""
decorator, input, rest = data
image_file = None
image_directive = None
#print 'INPUT:', data # dbg
is_verbatim = decorator=='@verbatim' or self.is_verbatim
is_doctest = decorator=='@doctest' or self.is_doctest
is_suppress = decorator=='@suppress' or self.is_suppress
is_savefig = decorator is not None and \
decorator.startswith('@savefig')
input_lines = input.split('\n')
if len(input_lines) > 1:
if input_lines[-1] != "":
input_lines.append('') # make sure there's a blank line
# so splitter buffer gets reset
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
Nc = len(continuation)
if is_savefig:
image_file, image_directive = self.process_image(decorator)
ret = []
is_semicolon = False
for i, line in enumerate(input_lines):
if line.endswith(';'):
is_semicolon = True
if i==0:
# process the first input line
if is_verbatim:
self.process_input_line('')
self.IP.execution_count += 1 # increment it anyway
else:
# only submit the line in non-verbatim mode
self.process_input_line(line, store_history=True)
formatted_line = '%s %s'%(input_prompt, line)
else:
# process a continuation line
if not is_verbatim:
self.process_input_line(line, store_history=True)
formatted_line = '%s %s'%(continuation, line)
if not is_suppress:
ret.append(formatted_line)
if not is_suppress and len(rest.strip()) and is_verbatim:
# the "rest" is the standard output of the
# input, which needs to be added in
# verbatim mode
ret.append(rest)
self.cout.seek(0)
output = self.cout.read()
if not is_suppress and not is_semicolon:
ret.append(output)
elif is_semicolon: # get spacing right
ret.append('')
self.cout.truncate(0)
return (ret, input_lines, output, is_doctest, image_file,
image_directive)
#print 'OUTPUT', output # dbg
def process_output(self, data, output_prompt,
input_lines, output, is_doctest, image_file):
"""Process data block for OUTPUT token."""
if is_doctest:
submitted = data.strip()
found = output
if found is not None:
found = found.strip()
# XXX - fperez: in 0.11, 'output' never comes with the prompt
# in it, just the actual output text. So I think all this code
# can be nuked...
# the above comment does not appear to be accurate... (minrk)
ind = found.find(output_prompt)
if ind<0:
e='output prompt="%s" does not match out line=%s' % \
(output_prompt, found)
raise RuntimeError(e)
found = found[len(output_prompt):].strip()
if found!=submitted:
e = ('doctest failure for input_lines="%s" with '
'found_output="%s" and submitted output="%s"' %
(input_lines, found, submitted) )
raise RuntimeError(e)
#print 'doctest PASSED for input_lines="%s" with found_output="%s" and submitted output="%s"'%(input_lines, found, submitted)
def process_comment(self, data):
"""Process data fPblock for COMMENT token."""
if not self.is_suppress:
return [data]
def save_image(self, image_file):
"""
Saves the image file to disk.
"""
self.ensure_pyplot()
command = 'plt.gcf().savefig("%s")'%image_file
#print 'SAVEFIG', command # dbg
self.process_input_line('bookmark ipy_thisdir', store_history=False)
self.process_input_line('cd -b ipy_savedir', store_history=False)
self.process_input_line(command, store_history=False)
self.process_input_line('cd -b ipy_thisdir', store_history=False)
self.process_input_line('bookmark -d ipy_thisdir', store_history=False)
self.clear_cout()
def process_block(self, block):
"""
process block from the block_parser and return a list of processed lines
"""
ret = []
output = None
input_lines = None
lineno = self.IP.execution_count
input_prompt = self.promptin%lineno
output_prompt = self.promptout%lineno
image_file = None
image_directive = None
for token, data in block:
if token==COMMENT:
out_data = self.process_comment(data)
elif token==INPUT:
(out_data, input_lines, output, is_doctest, image_file,
image_directive) = \
self.process_input(data, input_prompt, lineno)
elif token==OUTPUT:
out_data = \
self.process_output(data, output_prompt,
input_lines, output, is_doctest,
image_file)
if out_data:
ret.extend(out_data)
# save the image files
if image_file is not None:
self.save_image(image_file)
return ret, image_directive
def ensure_pyplot(self):
if self._pyplot_imported:
return
self.process_input_line('import matplotlib.pyplot as plt',
store_history=False)
def process_pure_python(self, content):
"""
content is a list of strings. it is unedited directive conent
This runs it line by line in the InteractiveShell, prepends
prompts as needed capturing stderr and stdout, then returns
the content as a list as if it were ipython code
"""
output = []
savefig = False # keep up with this to clear figure
multiline = False # to handle line continuation
multiline_start = None
fmtin = self.promptin
ct = 0
for lineno, line in enumerate(content):
line_stripped = line.strip()
if not len(line):
output.append(line)
continue
# handle decorators
if line_stripped.startswith('@'):
output.extend([line])
if 'savefig' in line:
savefig = True # and need to clear figure
continue
# handle comments
if line_stripped.startswith('#'):
output.extend([line])
continue
# deal with lines checking for multiline
continuation = u' %s:'% ''.join(['.']*(len(str(ct))+2))
if not multiline:
modified = u"%s %s" % (fmtin % ct, line_stripped)
output.append(modified)
ct += 1
try:
ast.parse(line_stripped)
output.append(u'')
except Exception: # on a multiline
multiline = True
multiline_start = lineno
else: # still on a multiline
modified = u'%s %s' % (continuation, line)
output.append(modified)
try:
mod = ast.parse(
'\n'.join(content[multiline_start:lineno+1]))
if isinstance(mod.body[0], ast.FunctionDef):
# check to see if we have the whole function
for element in mod.body[0].body:
if isinstance(element, ast.Return):
multiline = False
else:
output.append(u'')
multiline = False
except Exception:
pass
if savefig: # clear figure if plotted
self.ensure_pyplot()
self.process_input_line('plt.clf()', store_history=False)
self.clear_cout()
savefig = False
return output
class IpythonDirective(Directive):
has_content = True
required_arguments = 0
optional_arguments = 4 # python, suppress, verbatim, doctest
final_argumuent_whitespace = True
option_spec = { 'python': directives.unchanged,
'suppress' : directives.flag,
'verbatim' : directives.flag,
'doctest' : directives.flag,
}
shell = EmbeddedSphinxShell()
def get_config_options(self):
# contains sphinx configuration variables
config = self.state.document.settings.env.config
# get config variables to set figure output directory
confdir = self.state.document.settings.env.app.confdir
savefig_dir = config.ipython_savefig_dir
source_dir = os.path.dirname(self.state.document.current_source)
if savefig_dir is None:
savefig_dir = config.html_static_path
if isinstance(savefig_dir, list):
savefig_dir = savefig_dir[0] # safe to assume only one path?
savefig_dir = os.path.join(confdir, savefig_dir)
# get regex and prompt stuff
rgxin = config.ipython_rgxin
rgxout = config.ipython_rgxout
promptin = config.ipython_promptin
promptout = config.ipython_promptout
return savefig_dir, source_dir, rgxin, rgxout, promptin, promptout
def setup(self):
# reset the execution count if we haven't processed this doc
#NOTE: this may be borked if there are multiple seen_doc tmp files
#check time stamp?
seen_docs = [i for i in os.listdir(tempfile.tempdir)
if i.startswith('seen_doc')]
if seen_docs:
fname = os.path.join(tempfile.tempdir, seen_docs[0])
docs = open(fname).read().split('\n')
if not self.state.document.current_source in docs:
self.shell.IP.history_manager.reset()
self.shell.IP.execution_count = 1
else: # haven't processed any docs yet
docs = []
# get config values
(savefig_dir, source_dir, rgxin,
rgxout, promptin, promptout) = self.get_config_options()
# and attach to shell so we don't have to pass them around
self.shell.rgxin = rgxin
self.shell.rgxout = rgxout
self.shell.promptin = promptin
self.shell.promptout = promptout
self.shell.savefig_dir = savefig_dir
self.shell.source_dir = source_dir
# setup bookmark for saving figures directory
self.shell.process_input_line('bookmark ipy_savedir %s'%savefig_dir,
store_history=False)
self.shell.clear_cout()
# write the filename to a tempfile because it's been "seen" now
if not self.state.document.current_source in docs:
fd, fname = tempfile.mkstemp(prefix="seen_doc", text=True)
fout = open(fname, 'a')
fout.write(self.state.document.current_source+'\n')
fout.close()
return rgxin, rgxout, promptin, promptout
def teardown(self):
# delete last bookmark
self.shell.process_input_line('bookmark -d ipy_savedir',
store_history=False)
self.shell.clear_cout()
def run(self):
debug = False
#TODO, any reason block_parser can't be a method of embeddable shell
# then we wouldn't have to carry these around
rgxin, rgxout, promptin, promptout = self.setup()
options = self.options
self.shell.is_suppress = 'suppress' in options
self.shell.is_doctest = 'doctest' in options
self.shell.is_verbatim = 'verbatim' in options
# handle pure python code
if 'python' in self.arguments:
content = self.content
self.content = self.shell.process_pure_python(content)
parts = '\n'.join(self.content).split('\n\n')
lines = ['.. code-block:: ipython','']
figures = []
for part in parts:
block = block_parser(part, rgxin, rgxout, promptin, promptout)
if len(block):
rows, figure = self.shell.process_block(block)
for row in rows:
lines.extend([' %s'%line for line in row.split('\n')])
if figure is not None:
figures.append(figure)
#text = '\n'.join(lines)
#figs = '\n'.join(figures)
for figure in figures:
lines.append('')
lines.extend(figure.split('\n'))
lines.append('')
#print lines
if len(lines)>2:
if debug:
print '\n'.join(lines)
else: #NOTE: this raises some errors, what's it for?
#print 'INSERTING %d lines'%len(lines)
self.state_machine.insert_input(
lines, self.state_machine.input_lines.source(0))
text = '\n'.join(lines)
txtnode = nodes.literal_block(text, text)
txtnode['language'] = 'ipython'
#imgnode = nodes.image(figs)
# cleanup
self.teardown()
return []#, imgnode]
# Enable as a proper Sphinx directive
def setup(app):
setup.app = app
app.add_directive('ipython', IpythonDirective)
app.add_config_value('ipython_savefig_dir', None, True)
app.add_config_value('ipython_rgxin',
re.compile('In \[(\d+)\]:\s?(.*)\s*'), True)
app.add_config_value('ipython_rgxout',
re.compile('Out\[(\d+)\]:\s?(.*)\s*'), True)
app.add_config_value('ipython_promptin', 'In [%d]:', True)
app.add_config_value('ipython_promptout', 'Out[%d]:', True)
# Simple smoke test, needs to be converted to a proper automatic test.
def test():
examples = [
r"""
In [9]: pwd
Out[9]: '/home/jdhunter/py4science/book'
In [10]: cd bookdata/
/home/jdhunter/py4science/book/bookdata
In [2]: from pylab import *
In [2]: ion()
In [3]: im = imread('stinkbug.png')
@savefig mystinkbug.png width=4in
In [4]: imshow(im)
Out[4]: <matplotlib.image.AxesImage object at 0x39ea850>
""",
r"""
In [1]: x = 'hello world'
# string methods can be
# used to alter the string
@doctest
In [2]: x.upper()
Out[2]: 'HELLO WORLD'
@verbatim
In [3]: x.st<TAB>
x.startswith x.strip
""",
r"""
In [130]: url = 'http://ichart.finance.yahoo.com/table.csv?s=CROX\
.....: &d=9&e=22&f=2009&g=d&a=1&br=8&c=2006&ignore=.csv'
In [131]: print url.split('&')
['http://ichart.finance.yahoo.com/table.csv?s=CROX', 'd=9', 'e=22', 'f=2009', 'g=d', 'a=1', 'b=8', 'c=2006', 'ignore=.csv']
In [60]: import urllib
""",
r"""\
In [133]: import numpy.random
@suppress
In [134]: numpy.random.seed(2358)
@doctest
In [135]: numpy.random.rand(10,2)
Out[135]:
array([[ 0.64524308, 0.59943846],
[ 0.47102322, 0.8715456 ],
[ 0.29370834, 0.74776844],
[ 0.99539577, 0.1313423 ],
[ 0.16250302, 0.21103583],
[ 0.81626524, 0.1312433 ],
[ 0.67338089, 0.72302393],
[ 0.7566368 , 0.07033696],
[ 0.22591016, 0.77731835],
[ 0.0072729 , 0.34273127]])
""",
r"""
In [106]: print x
jdh
In [109]: for i in range(10):
.....: print i
.....:
.....:
0
1
2
3
4
5
6
7
8
9
""",
r"""
In [144]: from pylab import *
In [145]: ion()
# use a semicolon to suppress the output
@savefig test_hist.png width=4in
In [151]: hist(np.random.randn(10000), 100);
@savefig test_plot.png width=4in
In [151]: plot(np.random.randn(10000), 'o');
""",
r"""
# use a semicolon to suppress the output
In [151]: plt.clf()
@savefig plot_simple.png width=4in
In [151]: plot([1,2,3])
@savefig hist_simple.png width=4in
In [151]: hist(np.random.randn(10000), 100);
""",
r"""
# update the current fig
In [151]: ylabel('number')
In [152]: title('normal distribution')
@savefig hist_with_text.png
In [153]: grid(True)
""",
]
# skip local-file depending first example:
examples = examples[1:]
#ipython_directive.DEBUG = True # dbg
#options = dict(suppress=True) # dbg
options = dict()
for example in examples:
content = example.split('\n')
ipython_directive('debug', arguments=None, options=options,
content=content, lineno=0,
content_offset=None, block_text=None,
state=None, state_machine=None,
)
# Run test suite as a script
if __name__=='__main__':
if not os.path.isdir('_static'):
os.mkdir('_static')
test()
print 'All OK? Check figures in _static/'
| lgpl-3.0 |
alexeyum/scikit-learn | examples/cluster/plot_feature_agglomeration_vs_univariate_selection.py | 87 | 3903 | """
==============================================
Feature agglomeration vs. univariate selection
==============================================
This example compares 2 dimensionality reduction strategies:
- univariate feature selection with Anova
- feature agglomeration with Ward hierarchical clustering
Both methods are compared in a regression problem using
a BayesianRidge as supervised estimator.
"""
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
import shutil
import tempfile
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg, ndimage
from sklearn.feature_extraction.image import grid_to_graph
from sklearn import feature_selection
from sklearn.cluster import FeatureAgglomeration
from sklearn.linear_model import BayesianRidge
from sklearn.pipeline import Pipeline
from sklearn.externals.joblib import Memory
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import KFold
###############################################################################
# Generate data
n_samples = 200
size = 40 # image size
roi_size = 15
snr = 5.
np.random.seed(0)
mask = np.ones([size, size], dtype=np.bool)
coef = np.zeros((size, size))
coef[0:roi_size, 0:roi_size] = -1.
coef[-roi_size:, -roi_size:] = 1.
X = np.random.randn(n_samples, size ** 2)
for x in X: # smooth data
x[:] = ndimage.gaussian_filter(x.reshape(size, size), sigma=1.0).ravel()
X -= X.mean(axis=0)
X /= X.std(axis=0)
y = np.dot(X, coef.ravel())
noise = np.random.randn(y.shape[0])
noise_coef = (linalg.norm(y, 2) / np.exp(snr / 20.)) / linalg.norm(noise, 2)
y += noise_coef * noise # add noise
###############################################################################
# Compute the coefs of a Bayesian Ridge with GridSearch
cv = KFold(2) # cross-validation generator for model selection
ridge = BayesianRidge()
cachedir = tempfile.mkdtemp()
mem = Memory(cachedir=cachedir, verbose=1)
# Ward agglomeration followed by BayesianRidge
connectivity = grid_to_graph(n_x=size, n_y=size)
ward = FeatureAgglomeration(n_clusters=10, connectivity=connectivity,
memory=mem)
clf = Pipeline([('ward', ward), ('ridge', ridge)])
# Select the optimal number of parcels with grid search
clf = GridSearchCV(clf, {'ward__n_clusters': [10, 20, 30]}, n_jobs=1, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_agglomeration_ = coef_.reshape(size, size)
# Anova univariate feature selection followed by BayesianRidge
f_regression = mem.cache(feature_selection.f_regression) # caching function
anova = feature_selection.SelectPercentile(f_regression)
clf = Pipeline([('anova', anova), ('ridge', ridge)])
# Select the optimal percentage of features with grid search
clf = GridSearchCV(clf, {'anova__percentile': [5, 10, 20]}, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_.reshape(1, -1))
coef_selection_ = coef_.reshape(size, size)
###############################################################################
# Inverse the transformation to plot the results on an image
plt.close('all')
plt.figure(figsize=(7.3, 2.7))
plt.subplot(1, 3, 1)
plt.imshow(coef, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("True weights")
plt.subplot(1, 3, 2)
plt.imshow(coef_selection_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Selection")
plt.subplot(1, 3, 3)
plt.imshow(coef_agglomeration_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Agglomeration")
plt.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.16, 0.26)
plt.show()
# Attempt to remove the temporary cachedir, but don't worry if it fails
shutil.rmtree(cachedir, ignore_errors=True)
| bsd-3-clause |
AlexRobson/scikit-learn | sklearn/tests/test_multiclass.py | 72 | 24581 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_greater
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multiclass import OneVsOneClassifier
from sklearn.multiclass import OutputCodeClassifier
from sklearn.multiclass import fit_ovr
from sklearn.multiclass import fit_ovo
from sklearn.multiclass import fit_ecoc
from sklearn.multiclass import predict_ovr
from sklearn.multiclass import predict_ovo
from sklearn.multiclass import predict_ecoc
from sklearn.multiclass import predict_proba_ovr
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.preprocessing import LabelBinarizer
from sklearn.svm import LinearSVC, SVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import (LinearRegression, Lasso, ElasticNet, Ridge,
Perceptron, LogisticRegression)
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn import svm
from sklearn import datasets
from sklearn.externals.six.moves import zip
iris = datasets.load_iris()
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
n_classes = 3
def test_ovr_exceptions():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovr.predict, [])
with ignore_warnings():
assert_raises(ValueError, predict_ovr, [LinearSVC(), MultinomialNB()],
LabelBinarizer(), [])
# Fail on multioutput data
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1, 2], [3, 1]]))
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1.5, 2.4], [3.1, 0.8]]))
def test_ovr_fit_predict():
# A classifier which implements decision_function.
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
clf = LinearSVC(random_state=0)
pred2 = clf.fit(iris.data, iris.target).predict(iris.data)
assert_equal(np.mean(iris.target == pred), np.mean(iris.target == pred2))
# A classifier which implements predict_proba.
ovr = OneVsRestClassifier(MultinomialNB())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_greater(np.mean(iris.target == pred), 0.65)
def test_ovr_ovo_regressor():
# test that ovr and ovo work on regressors which don't have a decision_function
ovr = OneVsRestClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
ovr = OneVsOneClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes * (n_classes - 1) / 2)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
def test_ovr_fit_predict_sparse():
for sparse in [sp.csr_matrix, sp.csc_matrix, sp.coo_matrix, sp.dok_matrix,
sp.lil_matrix]:
base_clf = MultinomialNB(alpha=1)
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
return_indicator=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
clf_sprs = OneVsRestClassifier(base_clf).fit(X_train, sparse(Y_train))
Y_pred_sprs = clf_sprs.predict(X_test)
assert_true(clf.multilabel_)
assert_true(sp.issparse(Y_pred_sprs))
assert_array_equal(Y_pred_sprs.toarray(), Y_pred)
# Test predict_proba
Y_proba = clf_sprs.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred_sprs.toarray())
# Test decision_function
clf_sprs = OneVsRestClassifier(svm.SVC()).fit(X_train, sparse(Y_train))
dec_pred = (clf_sprs.decision_function(X_test) > 0).astype(int)
assert_array_equal(dec_pred, clf_sprs.predict(X_test).toarray())
def test_ovr_always_present():
# Test that ovr works with classes that are always present or absent.
# Note: tests is the case where _ConstantPredictor is utilised
X = np.ones((10, 2))
X[:5, :] = 0
# Build an indicator matrix where two features are always on.
# As list of lists, it would be: [[int(i >= 5), 2, 3] for i in range(10)]
y = np.zeros((10, 3))
y[5:, 0] = 1
y[:, 1] = 1
y[:, 2] = 1
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict(X)
assert_array_equal(np.array(y_pred), np.array(y))
y_pred = ovr.decision_function(X)
assert_equal(np.unique(y_pred[:, -2:]), 1)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.ones(X.shape[0]))
# y has a constantly absent label
y = np.zeros((10, 2))
y[5:, 0] = 1 # variable label
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.zeros(X.shape[0]))
def test_ovr_multiclass():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "ham", "eggs", "ham"]
Y = np.array([[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[0, 0, 1],
[1, 0, 0]])
classes = set("ham eggs spam".split())
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet()):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[0, 0, 4]])[0]
assert_array_equal(y_pred, [0, 0, 1])
def test_ovr_binary():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "spam", "eggs", "spam"]
Y = np.array([[0, 1, 1, 0, 1]]).T
classes = set("eggs spam".split())
def conduct_test(base_clf, test_predict_proba=False):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
if test_predict_proba:
X_test = np.array([[0, 0, 4]])
probabilities = clf.predict_proba(X_test)
assert_equal(2, len(probabilities[0]))
assert_equal(clf.classes_[np.argmax(probabilities, axis=1)],
clf.predict(X_test))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[3, 0, 0]])[0]
assert_equal(y_pred, 1)
for base_clf in (LinearSVC(random_state=0), LinearRegression(),
Ridge(), ElasticNet()):
conduct_test(base_clf)
for base_clf in (MultinomialNB(), SVC(probability=True),
LogisticRegression()):
conduct_test(base_clf, test_predict_proba=True)
@ignore_warnings
def test_ovr_multilabel():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 4, 5], [0, 5, 0], [3, 3, 3], [4, 0, 6], [6, 0, 0]])
y = [["spam", "eggs"], ["spam"], ["ham", "eggs", "spam"],
["ham", "eggs"], ["ham"]]
# y = [[1, 2], [1], [0, 1, 2], [0, 2], [0]]
Y = np.array([[0, 1, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 1],
[1, 0, 0]])
classes = set("ham eggs spam".split())
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet(), Lasso(alpha=0.5)):
# test input as lists of tuples
clf = assert_warns(DeprecationWarning,
OneVsRestClassifier(base_clf).fit,
X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict([[0, 4, 4]])[0]
assert_equal(set(y_pred), set(["spam", "eggs"]))
assert_true(clf.multilabel_)
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[0, 4, 4]])[0]
assert_array_equal(y_pred, [0, 1, 1])
assert_true(clf.multilabel_)
def test_ovr_fit_predict_svc():
ovr = OneVsRestClassifier(svm.SVC())
ovr.fit(iris.data, iris.target)
assert_equal(len(ovr.estimators_), 3)
assert_greater(ovr.score(iris.data, iris.target), .9)
def test_ovr_multilabel_dataset():
base_clf = MultinomialNB(alpha=1)
for au, prec, recall in zip((True, False), (0.51, 0.66), (0.51, 0.80)):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=2,
length=50,
allow_unlabeled=au,
return_indicator=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
assert_true(clf.multilabel_)
assert_almost_equal(precision_score(Y_test, Y_pred, average="micro"),
prec,
decimal=2)
assert_almost_equal(recall_score(Y_test, Y_pred, average="micro"),
recall,
decimal=2)
def test_ovr_multilabel_predict_proba():
base_clf = MultinomialNB(alpha=1)
for au in (False, True):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=au,
return_indicator=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
# Estimator with predict_proba disabled, depending on parameters.
decision_only = OneVsRestClassifier(svm.SVC(probability=False))
decision_only.fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred)
def test_ovr_single_label_predict_proba():
base_clf = MultinomialNB(alpha=1)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
assert_almost_equal(Y_proba.sum(axis=1), 1.0)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = np.array([l.argmax() for l in Y_proba])
assert_false((pred - Y_pred).any())
def test_ovr_multilabel_decision_function():
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
return_indicator=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal((clf.decision_function(X_test) > 0).astype(int),
clf.predict(X_test))
def test_ovr_single_label_decision_function():
X, Y = datasets.make_classification(n_samples=100,
n_features=20,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal(clf.decision_function(X_test).ravel() > 0,
clf.predict(X_test))
def test_ovr_gridsearch():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovr, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovr_pipeline():
# Test with pipeline of length one
# This test is needed because the multiclass estimators may fail to detect
# the presence of predict_proba or decision_function.
clf = Pipeline([("tree", DecisionTreeClassifier())])
ovr_pipe = OneVsRestClassifier(clf)
ovr_pipe.fit(iris.data, iris.target)
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_array_equal(ovr.predict(iris.data), ovr_pipe.predict(iris.data))
def test_ovr_coef_():
for base_classifier in [SVC(kernel='linear', random_state=0), LinearSVC(random_state=0)]:
# SVC has sparse coef with sparse input data
ovr = OneVsRestClassifier(base_classifier)
for X in [iris.data, sp.csr_matrix(iris.data)]:
# test with dense and sparse coef
ovr.fit(X, iris.target)
shape = ovr.coef_.shape
assert_equal(shape[0], n_classes)
assert_equal(shape[1], iris.data.shape[1])
# don't densify sparse coefficients
assert_equal(sp.issparse(ovr.estimators_[0].coef_), sp.issparse(ovr.coef_))
def test_ovr_coef_exceptions():
# Not fitted exception!
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
# lambda is needed because we don't want coef_ to be evaluated right away
assert_raises(ValueError, lambda x: ovr.coef_, None)
# Doesn't have coef_ exception!
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_raises(AttributeError, lambda x: ovr.coef_, None)
def test_ovo_exceptions():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovo.predict, [])
def test_ovo_fit_on_list():
# Test that OneVsOne fitting works with a list of targets and yields the
# same output as predict from an array
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
prediction_from_array = ovo.fit(iris.data, iris.target).predict(iris.data)
prediction_from_list = ovo.fit(iris.data,
list(iris.target)).predict(iris.data)
assert_array_equal(prediction_from_array, prediction_from_list)
def test_ovo_fit_predict():
# A classifier which implements decision_function.
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
# A classifier which implements predict_proba.
ovo = OneVsOneClassifier(MultinomialNB())
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
def test_ovo_decision_function():
n_samples = iris.data.shape[0]
ovo_clf = OneVsOneClassifier(LinearSVC(random_state=0))
ovo_clf.fit(iris.data, iris.target)
decisions = ovo_clf.decision_function(iris.data)
assert_equal(decisions.shape, (n_samples, n_classes))
assert_array_equal(decisions.argmax(axis=1), ovo_clf.predict(iris.data))
# Compute the votes
votes = np.zeros((n_samples, n_classes))
k = 0
for i in range(n_classes):
for j in range(i + 1, n_classes):
pred = ovo_clf.estimators_[k].predict(iris.data)
votes[pred == 0, i] += 1
votes[pred == 1, j] += 1
k += 1
# Extract votes and verify
assert_array_equal(votes, np.round(decisions))
for class_idx in range(n_classes):
# For each sample and each class, there only 3 possible vote levels
# because they are only 3 distinct class pairs thus 3 distinct
# binary classifiers.
# Therefore, sorting predictions based on votes would yield
# mostly tied predictions:
assert_true(set(votes[:, class_idx]).issubset(set([0., 1., 2.])))
# The OVO decision function on the other hand is able to resolve
# most of the ties on this data as it combines both the vote counts
# and the aggregated confidence levels of the binary classifiers
# to compute the aggregate decision function. The iris dataset
# has 150 samples with a couple of duplicates. The OvO decisions
# can resolve most of the ties:
assert_greater(len(np.unique(decisions[:, class_idx])), 146)
def test_ovo_gridsearch():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovo, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovo_ties():
# Test that ties are broken using the decision function,
# not defaulting to the smallest label
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y = np.array([2, 0, 1, 2])
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False))
ovo_prediction = multi_clf.fit(X, y).predict(X)
ovo_decision = multi_clf.decision_function(X)
# Classifiers are in order 0-1, 0-2, 1-2
# Use decision_function to compute the votes and the normalized
# sum_of_confidences, which is used to disambiguate when there is a tie in
# votes.
votes = np.round(ovo_decision)
normalized_confidences = ovo_decision - votes
# For the first point, there is one vote per class
assert_array_equal(votes[0, :], 1)
# For the rest, there is no tie and the prediction is the argmax
assert_array_equal(np.argmax(votes[1:], axis=1), ovo_prediction[1:])
# For the tie, the prediction is the class with the highest score
assert_equal(ovo_prediction[0], normalized_confidences[0].argmax())
def test_ovo_ties2():
# test that ties can not only be won by the first two labels
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y_ref = np.array([2, 0, 1, 2])
# cycle through labels so that each label wins once
for i in range(3):
y = (y_ref + i) % 3
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False))
ovo_prediction = multi_clf.fit(X, y).predict(X)
assert_equal(ovo_prediction[0], i % 3)
def test_ovo_string_y():
# Test that the OvO doesn't mess up the encoding of string labels
X = np.eye(4)
y = np.array(['a', 'b', 'c', 'd'])
ovo = OneVsOneClassifier(LinearSVC())
ovo.fit(X, y)
assert_array_equal(y, ovo.predict(X))
def test_ecoc_exceptions():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ecoc.predict, [])
def test_ecoc_fit_predict():
# A classifier which implements decision_function.
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
# A classifier which implements predict_proba.
ecoc = OutputCodeClassifier(MultinomialNB(), code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
def test_ecoc_gridsearch():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
random_state=0)
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ecoc, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
@ignore_warnings
def test_deprecated():
base_estimator = DecisionTreeClassifier(random_state=0)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
all_metas = [
(OneVsRestClassifier, fit_ovr, predict_ovr, predict_proba_ovr),
(OneVsOneClassifier, fit_ovo, predict_ovo, None),
(OutputCodeClassifier, fit_ecoc, predict_ecoc, None),
]
for MetaEst, fit_func, predict_func, proba_func in all_metas:
try:
meta_est = MetaEst(base_estimator,
random_state=0).fit(X_train, Y_train)
fitted_return = fit_func(base_estimator, X_train, Y_train,
random_state=0)
except TypeError:
meta_est = MetaEst(base_estimator).fit(X_train, Y_train)
fitted_return = fit_func(base_estimator, X_train, Y_train)
if len(fitted_return) == 2:
estimators_, classes_or_lb = fitted_return
assert_almost_equal(predict_func(estimators_, classes_or_lb,
X_test),
meta_est.predict(X_test))
if proba_func is not None:
assert_almost_equal(proba_func(estimators_, X_test,
is_multilabel=False),
meta_est.predict_proba(X_test))
else:
estimators_, classes_or_lb, codebook = fitted_return
assert_almost_equal(predict_func(estimators_, classes_or_lb,
codebook, X_test),
meta_est.predict(X_test))
| bsd-3-clause |
cainiaocome/scikit-learn | benchmarks/bench_glmnet.py | 297 | 3848 | """
To run this, you'll need to have installed.
* glmnet-python
* scikit-learn (of course)
Does two benchmarks
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import numpy as np
import gc
from time import time
from sklearn.datasets.samples_generator import make_regression
alpha = 0.1
# alpha = 0.01
def rmse(a, b):
return np.sqrt(np.mean((a - b) ** 2))
def bench(factory, X, Y, X_test, Y_test, ref_coef):
gc.collect()
# start time
tstart = time()
clf = factory(alpha=alpha).fit(X, Y)
delta = (time() - tstart)
# stop time
print("duration: %0.3fs" % delta)
print("rmse: %f" % rmse(Y_test, clf.predict(X_test)))
print("mean coef abs diff: %f" % abs(ref_coef - clf.coef_.ravel()).mean())
return delta
if __name__ == '__main__':
from glmnet.elastic_net import Lasso as GlmnetLasso
from sklearn.linear_model import Lasso as ScikitLasso
# Delayed import of pylab
import pylab as pl
scikit_results = []
glmnet_results = []
n = 20
step = 500
n_features = 1000
n_informative = n_features / 10
n_test_samples = 1000
for i in range(1, n + 1):
print('==================')
print('Iteration %s of %s' % (i, n))
print('==================')
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:(i * step)]
Y = Y[:(i * step)]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
pl.clf()
xx = range(0, n * step, step)
pl.title('Lasso regression on sample dataset (%d features)' % n_features)
pl.plot(xx, scikit_results, 'b-', label='scikit-learn')
pl.plot(xx, glmnet_results, 'r-', label='glmnet')
pl.legend()
pl.xlabel('number of samples to classify')
pl.ylabel('Time (s)')
pl.show()
# now do a benchmark where the number of points is fixed
# and the variable is the number of features
scikit_results = []
glmnet_results = []
n = 20
step = 100
n_samples = 500
for i in range(1, n + 1):
print('==================')
print('Iteration %02d of %02d' % (i, n))
print('==================')
n_features = i * step
n_informative = n_features / 10
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:n_samples]
Y = Y[:n_samples]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
xx = np.arange(100, 100 + n * step, step)
pl.figure('scikit-learn vs. glmnet benchmark results')
pl.title('Regression in high dimensional spaces (%d samples)' % n_samples)
pl.plot(xx, scikit_results, 'b-', label='scikit-learn')
pl.plot(xx, glmnet_results, 'r-', label='glmnet')
pl.legend()
pl.xlabel('number of features')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
manashmndl/scikit-learn | examples/datasets/plot_iris_dataset.py | 283 | 1928 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
The Iris Dataset
=========================================================
This data sets consists of 3 different types of irises'
(Setosa, Versicolour, and Virginica) petal and sepal
length, stored in a 150x4 numpy.ndarray
The rows being the samples and the columns being:
Sepal Length, Sepal Width, Petal Length and Petal Width.
The below plot uses the first two features.
See `here <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets
from sklearn.decomposition import PCA
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
plt.figure(2, figsize=(8, 6))
plt.clf()
# Plot the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
# To getter a better understanding of interaction of the dimensions
# plot the first three PCA dimensions
fig = plt.figure(1, figsize=(8, 6))
ax = Axes3D(fig, elev=-150, azim=110)
X_reduced = PCA(n_components=3).fit_transform(iris.data)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], X_reduced[:, 2], c=Y,
cmap=plt.cm.Paired)
ax.set_title("First three PCA directions")
ax.set_xlabel("1st eigenvector")
ax.w_xaxis.set_ticklabels([])
ax.set_ylabel("2nd eigenvector")
ax.w_yaxis.set_ticklabels([])
ax.set_zlabel("3rd eigenvector")
ax.w_zaxis.set_ticklabels([])
plt.show()
| bsd-3-clause |
rupakc/Kaggle-Compendium | Random Acts of Pizza/PizzaCombinedModel.py | 1 | 8053 | # -*- coding: utf-8 -*-
"""
Created on Thu Dec 31 23:03:00 2015
Defines the data model for Random Acts of Pizza
@author: Rupak Chakraborty
"""
import pandas as pd
import numpy as np
import math
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn import cross_validation
from sklearn import metrics
import ClassificationUtils
import time
import nltk
from nltk.tokenize import word_tokenize
filename = "Random Acts of Pizza/train.json"
class_map = {True:1,False:0}
jsonData = pd.read_json(filename)
jsonData = jsonData.iloc[np.random.permutation(len(jsonData))]
requester_pizza_status = np.array(map(lambda x: class_map[x],jsonData["requester_received_pizza"]))
class_labels = requester_pizza_status
data = np.zeros((len(jsonData),18))
#Function to Extract POS tag counts from a given text
def getNounAdjVerbs(text):
words = word_tokenize(text)
pos_tags = nltk.pos_tag(words)
nouns = 0
verbs = 0
adj = 0
for token in pos_tags:
k = token[1]
if k == "NN" or k == "NNP" or k == "NNS" or k == "NNPS":
nouns = nouns + 1
elif k == "JJ" or k == "JJR" or k == "JJS":
adj = adj + 1
elif k == "VB" or k == "VBD" or k == "VBG" or k == "VBN" or k == "VBP" or k == "VBZ":
verbs = verbs + 1
return nouns,adj,verbs
# Extract Text features
request_text__data = list(jsonData["request_text_edit_aware"])
request_text_title_data = list (jsonData["request_title"])
clean_text_data = list([])
clean_title_data = list([])
print "Starting feature loading and cleaning ..."
start = time.time()
for i in range(len(request_text__data)):
title_string = ClassificationUtils.textCleaningPipeline(request_text_title_data[i])
text_string = ClassificationUtils.textCleaningPipeline(request_text__data[i])
clean_text_data.append(text_string)
clean_title_data.append(title_string)
end = time.time()
print "Time taken to load and clean text features : ", end-start
# Extract whole features
number_of_downvotes_of_request_at_retrieval = np.array(jsonData["number_of_downvotes_of_request_at_retrieval"],dtype=float)
number_of_upvotes_of_request_at_retrieval = np.array(jsonData["number_of_upvotes_of_request_at_retrieval"],dtype=float)
request_number_of_comments_at_retrieval = np.array(jsonData["request_number_of_comments_at_retrieval"],dtype=float)
requester_number_of_subreddits_at_request = np.array(jsonData["requester_number_of_subreddits_at_request"],dtype=float)
whole_features = [number_of_downvotes_of_request_at_retrieval,number_of_upvotes_of_request_at_retrieval,\
request_number_of_comments_at_retrieval,requester_number_of_subreddits_at_request]
# Extract pairwise different features
requester_account_age_in_days_at_request = np.array(jsonData["requester_account_age_in_days_at_request"],dtype=float)
requester_account_age_in_days_at_retrieval = np.array(jsonData["requester_account_age_in_days_at_retrieval"],dtype=float)
requester_days_since_first_post_on_raop_at_request = np.array(jsonData["requester_days_since_first_post_on_raop_at_request"],dtype=float)
requester_days_since_first_post_on_raop_at_retrieval = np.array(jsonData["requester_days_since_first_post_on_raop_at_retrieval"],dtype=float)
requester_number_of_comments_at_request = np.array(jsonData["requester_number_of_comments_at_request"],dtype=float)
requester_number_of_comments_at_retrieval = np.array(jsonData["requester_number_of_comments_at_retrieval"],dtype=float)
requester_number_of_comments_in_raop_at_request = np.array(jsonData["requester_number_of_comments_in_raop_at_request"],dtype=float)
requester_number_of_comments_in_raop_at_retrieval = np.array(jsonData["requester_number_of_comments_in_raop_at_retrieval"],dtype=float)
requester_number_of_posts_at_request = np.array(jsonData["requester_number_of_posts_at_request"],dtype=float)
requester_number_of_posts_at_retrieval = np.array(jsonData["requester_number_of_posts_at_retrieval"],dtype=float)
requester_number_of_posts_on_raop_at_request = np.array(jsonData["requester_number_of_posts_on_raop_at_request"],dtype=float)
requester_number_of_posts_on_raop_at_retrieval = np.array(jsonData["requester_number_of_posts_on_raop_at_retrieval"],dtype=float)
requester_upvotes_minus_downvotes_at_request = np.array(jsonData["requester_upvotes_minus_downvotes_at_request"],dtype=float)
requester_upvotes_minus_downvotes_at_retrieval = np.array(jsonData["requester_upvotes_minus_downvotes_at_retrieval"],dtype=float)
requester_upvotes_plus_downvotes_at_request = np.array(jsonData["requester_upvotes_plus_downvotes_at_request"],dtype=float)
requester_upvotes_plus_downvotes_at_retrieval = np.array(jsonData["requester_upvotes_plus_downvotes_at_retrieval"],dtype=float)
request_features = [requester_account_age_in_days_at_request,requester_days_since_first_post_on_raop_at_request\
,requester_number_of_comments_at_request,requester_number_of_comments_in_raop_at_request,requester_number_of_posts_at_request\
,requester_number_of_posts_on_raop_at_request,requester_upvotes_minus_downvotes_at_request,requester_upvotes_plus_downvotes_at_request]
retrieval_features = [requester_account_age_in_days_at_retrieval,requester_days_since_first_post_on_raop_at_retrieval\
,requester_number_of_comments_at_retrieval,requester_number_of_comments_in_raop_at_retrieval,requester_number_of_posts_at_retrieval\
,requester_number_of_posts_on_raop_at_retrieval,requester_upvotes_minus_downvotes_at_retrieval,requester_upvotes_plus_downvotes_at_retrieval]
#Extracting and organizing the data in a numpy array
print "Starting feature organization and POS tagging"
start = time.time()
for i in range(len(data)):
feature_row = []
for whole in whole_features:
feature_row.append(whole[i])
for index,retrieval in enumerate(retrieval_features):
difference = retrieval[i] - request_features[index][i]
difference = ((difference + 1.0)/(request_features[index][i] + 1.0))*100.0
if math.isinf(difference) or math.isnan(difference):
difference = 1.0
feature_row.append(difference)
text_pos_tags = getNounAdjVerbs(clean_text_data[i])
title_post_tags = getNounAdjVerbs(clean_title_data[i])
total_pos_tag_count = text_pos_tags + title_post_tags
for tag_count in total_pos_tag_count:
feature_row.append(tag_count)
data[i,:] = feature_row
end = time.time()
print "Time Taken to extract all features : ", end-start
train_data,test_data,train_label,test_label = cross_validation.train_test_split(data,class_labels,test_size=0.3)
# Initializing the classifiers
rf = RandomForestClassifier(n_estimators=101)
ada = AdaBoostClassifier(n_estimators=101)
gradboost = GradientBoostingClassifier(n_estimators=101)
svm = SVC()
gnb = GaussianNB()
classifiers = [rf,ada,gradboost,svm,gnb]
classifier_names = ["Random Forests","AdaBoost","Gradient Boost","SVM","Gaussian NB"]
print "Starting Classification Performance Cycle ..."
start = time.time()
for classifier,classifier_name in zip(classifiers,classifier_names):
classifier.fit(train_data,train_label)
predicted_label = classifier.predict(test_data)
print "--------------------------------------------------------\n"
print "Accuracy for ",classifier_name, " : ",metrics.accuracy_score(test_label,predicted_label)
print "Confusion Matrix for ",classifier_name, " :\n ",metrics.confusion_matrix(test_label,predicted_label)
print "Classification Report for ",classifier_name, " : \n",metrics.classification_report(test_label,predicted_label)
print "--------------------------------------------------------\n"
end = time.time()
print "Time Taken for classification and performance Metrics calculation : ",end-start | mit |
gagnonlg/explore-ml | rnn.py | 1 | 3291 | import numpy as np
import matplotlib.pyplot as plt
import theano
import theano.tensor as T
np.random.seed(17430)
theano.config.floatX = 'float32'
#theano.config.optimizer='fast_compile'
#theano.config.exception_verbosity='high'
# generate data
def gen_sample():
mean = [
[0.0, 0.5],
[0.5, 0.0]
]
scale = [
[0.15, 0.15],
[0.25, 0.25]
]
n = np.random.randint(2,7)
i = np.random.randint(2)
return np.random.normal(mean[i], scale[i], (n,2)), i
def show_distribution():
for _ in range(500):
smp, cls = gen_sample()
plt.scatter(smp[:,0], smp[:,1], color='k' if cls == 0 else 'b')
plt.show()
def make_dataset(N):
dsetX = np.zeros((N, 6, 2))
dsetY = np.zeros((N, 1))
dsetM = np.zeros((N, 6))
for i in range(N):
smp, cls = gen_sample()
dsetX[i,:smp.shape[0]] = smp
dsetY[i,0] = cls
dsetM[i,:smp.shape[0]] = 1
return dsetX.astype('float32'), dsetY.astype('float32'), dsetM.astype('float32')
def rnn_step(x, h, U, b, W):
return T.tanh(b + T.dot(U, x) + T.dot(W, h))
# x: n_features
n_features = 2
x = T.matrix('x')
# h: n_state
n_state = 5
#h = theano.shared(np.random.uniform(size=n_state).astype('float32'))
# U*x -> U: n_state, n_features
U = theano.shared(
np.random.uniform(
size=(n_state, n_features),
).astype('float32')
)
# W*h -> W: n_state, n_state
W = theano.shared(
np.random.uniform(
size=(n_state, n_state),
).astype('float32')
)
# b: n_state
b = theano.shared(
np.zeros(n_state).astype('float32')
)
initial_state = theano.shared(
np.zeros(n_state).astype('float32')
)
results, updates = theano.scan(
fn=rnn_step,
outputs_info=T.zeros_like(initial_state),
sequences=x,
non_sequences=[U,b,W]
)
def pred_step(h, V, c):
return T.nnet.sigmoid(c + T.dot(V, h))
# Vh + c = y
# V: n_out x n_state
# c: n_out
n_out = 1
V = theano.shared(np.random.uniform(size=(n_out,n_state)).astype('float32'))
c = theano.shared(np.zeros((n_out,)).astype('float32'))
preds, pupds = theano.scan(
fn=pred_step,
outputs_info=None,
sequences=results,
non_sequences=[V, c]
)
y = T.vector('y')
mask = T.vector('mask')
loss = T.sum(mask.dimshuffle(0,'x') * T.nnet.binary_crossentropy(preds, y))
params = [U,W,b,V,c]
gparams = [T.grad(loss, p) for p in params]
gupdates = [
(param, param - 0.001 * gparam)
for param, gparam in zip(params, gparams)
]
rnnfunc = theano.function(
inputs=[x, y, mask],
outputs=[results, preds, loss],
updates=gupdates
)
rnntest = theano.function(
inputs=[x],
outputs=preds[-1]
)
trainX, trainY, trainM = make_dataset(1000)
testX, testY, _ = make_dataset(1000)
if __name__ == '__main__':
for epoch in range(10):
losses = []
for i in range(1000):
states, preds, ls = rnnfunc(trainX[i], trainY[i], trainM[i])
losses.append(ls)
good = float(0.0)
for i in range(1000):
pred = rnntest(testX[i])
pcls = (pred > 0.5).astype('float32')
if pcls[0] == testY[i][0]:
good += 1.0
print 'epoch {}: loss: {} acc:{}'.format(
epoch,
np.mean(losses),
(good/1000)
)
| gpl-3.0 |
JosmanPS/scikit-learn | examples/svm/plot_custom_kernel.py | 171 | 1546 | """
======================
SVM with custom kernel
======================
Simple usage of Support Vector Machines to classify a sample. It will
plot the decision surface and the support vectors.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
Y = iris.target
def my_kernel(X, Y):
"""
We create a custom kernel:
(2 0)
k(X, Y) = X ( ) Y.T
(0 1)
"""
M = np.array([[2, 0], [0, 1.0]])
return np.dot(np.dot(X, M), Y.T)
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data.
clf = svm.SVC(kernel=my_kernel)
clf.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.title('3-Class classification using Support Vector Machine with custom'
' kernel')
plt.axis('tight')
plt.show()
| bsd-3-clause |
arielmakestuff/loadlimit | loadlimit/result.py | 1 | 14015 | # -*- coding: utf-8 -*-
# loadlimit/result.py
# Copyright (C) 2016 authors and contributors (see AUTHORS file)
#
# This module is released under the MIT License.
"""Result classes used to create results from various statistics"""
# ============================================================================
# Imports
# ============================================================================
# Stdlib imports
from hashlib import sha1
from collections import namedtuple, OrderedDict
from pathlib import Path
from time import mktime
# Third-party imports
import pandas as pd
from pandas import (DataFrame, read_sql_table, Series)
from sqlalchemy import create_engine
# Local imports
from .stat import measure, Period
from .util import Namespace, now
# ============================================================================
# Result classes
# ============================================================================
class Result:
"""Calculate result DataFrame from a Period"""
def __init__(self, statsdict=None, countstore=None):
self._statsdict = Period() if statsdict is None else statsdict
self._countstore = measure if countstore is None else countstore
self._vals = Namespace()
def __iter__(self):
for name, datatype in self._statsdict.items():
yield (name, datatype['data'], datatype['error'],
datatype['failure'])
def __enter__(self):
"""Start calculating the result"""
countstore = self._countstore
vals = self.vals
vals.start = countstore.start_date
vals.end = countstore.end_date
return self
def __exit__(self, errtype, err, errtb):
"""Finish calculating the result"""
raise NotImplementedError
def __call__(self):
"""Calculate the result"""
calcfunc = self.calculate
vals = self.vals
with self:
for name, data, error, failure in self:
calcfunc(name, data, error, failure)
return vals.results
def calculate(self, name, data, error, failure):
"""Calculate results"""
raise NotImplementedError
def export(self, export_type, exportdir):
"""Export results"""
raise NotImplementedError
def exportdf(self, df, name, export_type, exportdir):
"""Export dataframe"""
timestamp = int(mktime(now().timetuple()))
filename = '{}_{}'.format(name, timestamp)
exportdir = Path(exportdir)
if export_type == 'csv':
path = exportdir / '{}.{}'.format(filename, 'csv')
df.to_csv(str(path), index_label=df.index.names)
else: # export_type == 'sqlite':
path = str(exportdir / '{}.{}'.format(filename, 'db'))
sqlengine = create_engine('sqlite:///{}'.format(path))
with sqlengine.begin() as conn:
df.to_sql('total', conn)
@property
def statsdict(self):
"""Return stored period statsdict"""
return self._statsdict
@property
def countstore(self):
"""Return stored countstore"""
return self._countstore
@property
def vals(self):
"""Return value namespace"""
return self._vals
class Total(Result):
"""Calculate totals"""
def __enter__(self):
ret = super().__enter__()
vals = self.vals
# Duration (in seconds)
vals.duration = (vals.end - vals.start).total_seconds()
vals.results = {}
vals.index = i = ['Total', 'Median', 'Average', 'Min', 'Max', 'Rate']
vals.resultcls = namedtuple('ResultType', [n.lower() for n in i])
vals.delta = None
return ret
def __exit__(self, errtype, err, errtb):
"""Finish calculations and save result"""
vals = self.vals
results = vals.results
dfindex = (k for k, v in results.items() if v is not None)
dfindex = list(sorted(dfindex, key=lambda k: k))
data = [results[v] for v in dfindex]
if not data:
vals.results = None
return
df = DataFrame(data, index=dfindex)
total = df['Total'].sum()
rate = 0 if vals.duration == 0 else total / vals.duration
result = [total, df['Median'].median(), df['Average'].mean(),
df['Min'].min(), df['Max'].max(), rate]
result = DataFrame([Series(result, index=vals.index)],
index=['Totals'])
vals.results = df = df.append(result)
df.index.names = ['Name']
def calculate(self, name, data, error, failure):
"""Calculate results"""
vals = self.vals
countstore = self.countstore
# Number of iterations
# This is a list of pandas.Series
total = countstore[name].success
numiter = len(data)
if numiter == 0:
vals.results[name] = None
return
# Create dataframe out of the timeseries and get only the response
# field
df = DataFrame(data, index=list(range(numiter)))
delta = df['response']
# Calculate stats
rate = 0 if vals.duration == 0 else total / vals.duration
r = [total, delta.median(), delta.mean(), delta.min(), delta.max(),
rate]
r = vals.resultcls(*r)
vals.results[name] = Series(r, index=vals.index)
def export(self, export_type, exportdir):
"""Export total values"""
results = self.vals.results
if results is not None:
self.exportdf(results, 'results', export_type, exportdir)
class TimeSeries(Result):
"""Calculate time series results"""
def __enter__(self):
ret = super().__enter__()
vals = self.vals
vals.response_result = {}
vals.rate_result = {}
return ret
def __exit__(self, errtype, err, errtb):
"""Finish calculations and save result"""
vals = self.vals
response_result = vals.response_result
rate_result = vals.rate_result
df_response = None
df_rate = None
# Create response dataframe
dfindex = (k for k, v in response_result.items() if v is not None)
dfindex = list(sorted(dfindex, key=lambda k: k))
data = OrderedDict()
for name in dfindex:
data[name] = response_result[name]
if data:
df_response = DataFrame(data)
df_response.index.names = ['Timestamp']
# Create rate dataframe
data = OrderedDict()
for name in dfindex:
data[name] = rate_result[name]
df_rate = DataFrame(data)
df_rate.index.names = ['Timestamp']
# Return both dataframes
vals.results = (df_response, df_rate)
for n in ['response_result', 'rate_result']:
delattr(vals, n)
def calculate(self, name, data, error, failure):
"""Calculate results"""
vals = self.vals
response = []
rate = []
# Number of iterations
numiter = len(data)
if numiter == 0:
vals.response_result[name] = None
vals.rate_result[name] = None
return
# Create dataframe out of the timeseries and get average response time
# for each determined datetime period
df = DataFrame(data, index=list(range(numiter)))
daterange = df['end']
response = df['response']
response.index = daterange
rate = df['rate']
rate.index = daterange
vals.response_result[name] = response # Series
vals.rate_result[name] = rate
def export(self, export_type, exportdir):
"""Export total values"""
df_response, df_rate = self.vals.results
for name, df in zip(['response', 'rate'], [df_response, df_rate]):
if df is not None:
self.exportdf(df, name, export_type, exportdir)
class GeneralError(Result):
"""Calculate error results"""
@property
def errtype(self):
"""Return errtype string"""
raise NotImplementedError
def __enter__(self):
ret = super().__enter__()
vals = self.vals
vals.results = {}
return ret
def __exit__(self, errtype, err, errtb):
"""Finish calculations and save result"""
results = self.vals.results
data = [v for v in results.values() if v is not None]
self.vals.results = pd.concat(data) if data else None
def calculate(self, name, *datatype):
"""Calculate results"""
vals = self.vals
errtype, errind = self.errtype
data = datatype[errind]
# Number of iterations
numiter = len(data)
if numiter == 0:
vals.results[name] = None
return
# Create dataframe out of the timeseries and get only the error field
df = DataFrame(data, index=list(range(numiter)))
df.insert(0, 'name', [name] * len(df))
aggregate = {'count': 'sum'}
result = df.groupby(['name', errtype]).agg(aggregate)
result.columns = ['Total']
result.index.names = ['Name', errtype.capitalize()]
vals.results[name] = result
def export(self, export_type, exportdir):
"""Export total values"""
results = self.vals.results
if results is not None:
self.exportdf(results, self.errtype[0], export_type, exportdir)
class TotalError(GeneralError):
"""Calculate error results"""
@property
def errtype(self):
"""Return error errortype"""
return 'error', 1
class TotalFailure(GeneralError):
"""Calculate failure results"""
@property
def errtype(self):
"""Return error errortype"""
return 'failure', 2
# ============================================================================
# SQL versions of Results
# ============================================================================
class SQLResult:
"""Define iterating over values stored in an sql db"""
def __init__(self, statsdict=None, countstore=None, sqltbl='period',
sqlengine=None):
super().__init__(statsdict, countstore)
vals = self.vals
vals.sqltbl = sqltbl
vals.sqlengine = sqlengine
vals.datatype = ['timedata', 'error', 'failure']
def __iter__(self):
vals = self.vals
sqlengine = vals.sqlengine
with sqlengine.begin() as conn:
for name in self._statsdict:
# Generate table name
curkey = sha1(name.encode('utf-8')).hexdigest()
df = {}
for k, substr in zip(vals.datatype,
['_', '_error_', '_failure_']):
tblname = '{}{}{}'.format(vals.sqltbl, substr, curkey)
df[k] = self.getdata(k, vals, sqlengine, conn, tblname)
yield name, df['timedata'], df['error'], df['failure']
def getdata(self, key, vals, sqlengine, sqlconn, tblname):
"""Get time data from db"""
# Get number of rows in db
hastable = sqlengine.dialect.has_table(sqlconn, tblname)
if not hastable:
return None
df = read_sql_table(tblname, sqlconn, index_col='index',
parse_dates={'end': dict(utc=True)})
return df
class SQLTotal(SQLResult, Total):
"""Calculate totals from sql db"""
def calculate(self, name, dfdata, dferror, dffailure):
"""Calculate results"""
vals = self.vals
countstore = self.countstore
total = countstore[name].success
numiter = len(dfdata.index)
if numiter == 0:
vals.results[name] = None
return
delta = dfdata['response']
# Calculate stats
rate = 0 if vals.duration == 0 else total / vals.duration
r = [total, delta.median(), delta.mean(), delta.min(), delta.max(),
rate]
r = vals.resultcls(*r)
vals.results[name] = Series(r, index=vals.index)
class SQLTimeSeries(SQLResult, TimeSeries):
"""Calculate time series results from sql db"""
def calculate(self, name, dfdata, dferror, dffailure):
"""Calculate results"""
vals = self.vals
# Number of iterations
numiter = len(dfdata.index)
if numiter == 0:
vals.response_result[name] = None
vals.rate_result[name] = None
return
response = []
rate = []
# Create dataframe out of the timeseries and get average response time
# for each determined datetime period
df = dfdata
daterange = df['end']
response = df['response']
response.index = daterange
rate = df['rate']
rate.index = daterange
vals.response_result[name] = response # Series
vals.rate_result[name] = rate
class SQLGeneralError:
"""Calculate error results"""
def calculate(self, name, *datatype):
"""Calculate results"""
vals = self.vals
errtype, errind = self.errtype
df = datatype[errind]
if df is None:
vals.results[name] = None
return
# Create dataframe out of the timeseries and get only the error field
df.insert(0, 'name', [name] * len(df))
aggregate = {'count': 'sum'}
result = df.groupby(['name', errtype]).agg(aggregate)
result.columns = ['Total']
result.index.names = ['Name', errtype.capitalize()]
vals.results[name] = result
class SQLTotalError(SQLResult, SQLGeneralError, TotalError):
"""Calculate total errors from sql db"""
class SQLTotalFailure(SQLResult, SQLGeneralError, TotalFailure):
"""Calculate total errors from sql db"""
# ============================================================================
#
# ============================================================================
| mit |
yarikoptic/pystatsmodels | statsmodels/sandbox/examples/ex_mixed_lls_timecorr.py | 4 | 7778 | # -*- coding: utf-8 -*-
"""Example using OneWayMixed with within group intertemporal correlation
Created on Sat Dec 03 10:15:55 2011
Author: Josef Perktold
This example constructs a linear model with individual specific random
effects, and uses OneWayMixed to estimate it.
This is a variation on ex_mixed_lls_0.py.
Here we use time dummies as random effects (all except 1st time period).
I think, this should allow for (almost) arbitrary intertemporal correlation.
The assumption is that each unit can have different constants, however the
intertemporal covariance matrix is the same for all units. One caveat, to
avoid singular matrices, we have to treat one time period differently.
Estimation requires that the number of units is larger than the number of
time periods. Also, it requires that we have the same number of periods for
each unit.
I needed to remove the first observation from the time dummies to avoid a
singular matrix. So, interpretation of time effects should be relative to
first observation. (I didn't check the math.)
TODO:
Note, I don't already have constant in X. Constant for first
time observation is missing.
Do I need all dummies in exog_fe, Z, but not in exog_re, Z? Tried this and
it works.
In the error decomposition we also have the noise variable, I guess this works
like constant, so we get full rank (square) with only T-1 time dummies.
But we don't get correlation with the noise, or do we? conditional?
-> sample correlation of estimated random effects looks a bit high,
upward bias? or still some problems with initial condition?
correlation from estimated cov_random looks good.
Since we include the time dummies also in the fixed effect, we can have
arbitrary trends, different constants in each period.
Intertemporal correlation in data generating process, DGP, to see if
the results correctly estimate it.
used AR(1) as example, but only starting at second period. (?)
Note: we don't impose AR structure in the estimation
"""
import numpy as np
from statsmodels.sandbox.panel.mixed import OneWayMixed, Unit
examples = ['ex1']
if 'ex1' in examples:
#np.random.seed(54321)
#np.random.seed(978326)
nsubj = 200
units = []
nobs_i = 8 #number of observations per unit, changed below
nx = 1 #number fixed effects
nz = nobs_i - 1 ##number random effects
beta = np.ones(nx)
gamma = 0.5 * np.ones(nz) #mean of random effect
#gamma[0] = 0
gamma_re_true = []
for i in range(nsubj):
#create data for one unit
#random effect/coefficient
use_correlated = True
if not use_correlated:
gamma_re = gamma + 0.2 * np.random.standard_normal(nz)
else:
#coefficients are AR(1) for all but first time periods
from scipy import linalg as splinalg
rho = 0.6
corr_re = splinalg.toeplitz(rho**np.arange(nz))
rvs = np.random.multivariate_normal(np.zeros(nz), corr_re)
gamma_re = gamma + 0.2 * rvs
#store true parameter for checking
gamma_re_true.append(gamma_re)
#generate exogenous variables
X = np.random.standard_normal((nobs_i, nx))
#try Z should be time dummies
time_dummies = (np.arange(nobs_i)[:, None] == np.arange(nobs_i)[None, :]).astype(float)
Z = time_dummies[:,1:]
# Z = np.random.standard_normal((nobs_i, nz-1))
# Z = np.column_stack((np.ones(nobs_i), Z))
noise = 0.1 * np.random.randn(nobs_i) #sig_e = 0.1
#generate endogenous variable
Y = np.dot(X, beta) + np.dot(Z, gamma_re) + noise
#add random effect design matrix also to fixed effects to
#capture the mean
#this seems to be necessary to force mean of RE to zero !?
#(It's not required for estimation but interpretation of random
#effects covariance matrix changes - still need to check details.
#X = np.hstack((X,Z))
X = np.hstack((X, time_dummies))
#create units and append to list
unit = Unit(Y, X, Z)
units.append(unit)
m = OneWayMixed(units)
import time
t0 = time.time()
m.initialize()
res = m.fit(maxiter=100, rtol=1.0e-5, params_rtol=1e-6, params_atol=1e-6)
t1 = time.time()
print 'time for initialize and fit', t1-t0
print 'number of iterations', m.iterations
#print dir(m)
#print vars(m)
print '\nestimates for fixed effects'
print m.a
print m.params
bfixed_cov = m.cov_fixed()
print 'beta fixed standard errors'
print np.sqrt(np.diag(bfixed_cov))
print m.bse
b_re = m.params_random_units
print 'RE mean:', b_re.mean(0)
print 'RE columns std', b_re.std(0)
print 'np.cov(b_re, rowvar=0), sample statistic'
print np.cov(b_re, rowvar=0)
print 'sample correlation of estimated random effects'
print np.corrcoef(b_re, rowvar=0)
print 'std of above'
#need atleast_1d or diag raises exception
print np.sqrt(np.diag(np.atleast_1d(np.cov(b_re, rowvar=0))))
print 'm.cov_random()'
print m.cov_random()
print 'correlation from above'
print res.cov_random()/ res.std_random()[:,None] /res.std_random()
print 'std of above'
print res.std_random()
print np.sqrt(np.diag(m.cov_random()))
print '\n(non)convergence of llf'
print m.history['llf'][-4:]
print 'convergence of parameters'
#print np.diff(np.vstack(m.history[-4:])[:,1:],axis=0)
print np.diff(np.vstack(m.history['params'][-4:]),axis=0)
print 'convergence of D'
print np.diff(np.array(m.history['D'][-4:]), axis=0)
#zdotb = np.array([np.dot(unit.Z, unit.b) for unit in m.units])
zb = np.array([(unit.Z * unit.b[None,:]).sum(0) for unit in m.units])
'''if Z is not included in X:
>>> np.dot(b_re.T, b_re)/100
array([[ 0.03270611, -0.00916051],
[-0.00916051, 0.26432783]])
>>> m.cov_random()
array([[ 0.0348722 , -0.00909159],
[-0.00909159, 0.26846254]])
>>> #note cov_random doesn't subtract mean!
'''
print '\nchecking the random effects distribution and prediction'
gamma_re_true = np.array(gamma_re_true)
print 'mean of random effect true', gamma_re_true.mean(0)
print 'mean from fixed effects ', m.params[-2:]
print 'mean of estimated RE ', b_re.mean(0)
print
absmean_true = np.abs(gamma_re_true).mean(0)
mape = ((m.params[-nz:] + b_re) / gamma_re_true - 1).mean(0)*100
mean_abs_perc = np.abs((m.params[-nz:] + b_re) - gamma_re_true).mean(0) \
/ absmean_true*100
median_abs_perc = np.median(np.abs((m.params[-nz:] + b_re) - gamma_re_true), 0) \
/ absmean_true*100
rmse_perc = ((m.params[-nz:] + b_re) - gamma_re_true).std(0) \
/ absmean_true*100
print 'mape ', mape
print 'mean_abs_perc ', mean_abs_perc
print 'median_abs_perc', median_abs_perc
print 'rmse_perc (std)', rmse_perc
from numpy.testing import assert_almost_equal
#assert is for n_units=100 in original example
#I changed random number generation, so this won't work anymore
#assert_almost_equal(rmse_perc, [ 34.14783884, 11.6031684 ], decimal=8)
#now returns res
print 'llf', res.llf #based on MLE, does not include constant
print 'tvalues', res.tvalues
print 'pvalues', res.pvalues
rmat = np.zeros(len(res.params))
rmat[-nz:] = 1
print 't_test mean of random effects variables are zero'
print res.t_test(rmat)
print 'f_test mean of both random effects variables is zero (joint hypothesis)'
print res.f_test(rmat)
plots = res.plot_random_univariate() #(bins=50)
fig = res.plot_scatter_all_pairs()
import matplotlib.pyplot as plt
plt.show()
| bsd-3-clause |
gotomypc/scikit-learn | examples/calibration/plot_calibration_multiclass.py | 272 | 6972 | """
==================================================
Probability Calibration for 3-class classification
==================================================
This example illustrates how sigmoid calibration changes predicted
probabilities for a 3-class classification problem. Illustrated is the
standard 2-simplex, where the three corners correspond to the three classes.
Arrows point from the probability vectors predicted by an uncalibrated
classifier to the probability vectors predicted by the same classifier after
sigmoid calibration on a hold-out validation set. Colors indicate the true
class of an instance (red: class 1, green: class 2, blue: class 3).
The base classifier is a random forest classifier with 25 base estimators
(trees). If this classifier is trained on all 800 training datapoints, it is
overly confident in its predictions and thus incurs a large log-loss.
Calibrating an identical classifier, which was trained on 600 datapoints, with
method='sigmoid' on the remaining 200 datapoints reduces the confidence of the
predictions, i.e., moves the probability vectors from the edges of the simplex
towards the center. This calibration results in a lower log-loss. Note that an
alternative would have been to increase the number of base estimators which
would have resulted in a similar decrease in log-loss.
"""
print(__doc__)
# Author: Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_blobs
from sklearn.ensemble import RandomForestClassifier
from sklearn.calibration import CalibratedClassifierCV
from sklearn.metrics import log_loss
np.random.seed(0)
# Generate data
X, y = make_blobs(n_samples=1000, n_features=2, random_state=42,
cluster_std=5.0)
X_train, y_train = X[:600], y[:600]
X_valid, y_valid = X[600:800], y[600:800]
X_train_valid, y_train_valid = X[:800], y[:800]
X_test, y_test = X[800:], y[800:]
# Train uncalibrated random forest classifier on whole train and validation
# data and evaluate on test data
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X_train_valid, y_train_valid)
clf_probs = clf.predict_proba(X_test)
score = log_loss(y_test, clf_probs)
# Train random forest classifier, calibrate on validation data and evaluate
# on test data
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X_train, y_train)
clf_probs = clf.predict_proba(X_test)
sig_clf = CalibratedClassifierCV(clf, method="sigmoid", cv="prefit")
sig_clf.fit(X_valid, y_valid)
sig_clf_probs = sig_clf.predict_proba(X_test)
sig_score = log_loss(y_test, sig_clf_probs)
# Plot changes in predicted probabilities via arrows
plt.figure(0)
colors = ["r", "g", "b"]
for i in range(clf_probs.shape[0]):
plt.arrow(clf_probs[i, 0], clf_probs[i, 1],
sig_clf_probs[i, 0] - clf_probs[i, 0],
sig_clf_probs[i, 1] - clf_probs[i, 1],
color=colors[y_test[i]], head_width=1e-2)
# Plot perfect predictions
plt.plot([1.0], [0.0], 'ro', ms=20, label="Class 1")
plt.plot([0.0], [1.0], 'go', ms=20, label="Class 2")
plt.plot([0.0], [0.0], 'bo', ms=20, label="Class 3")
# Plot boundaries of unit simplex
plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], 'k', label="Simplex")
# Annotate points on the simplex
plt.annotate(r'($\frac{1}{3}$, $\frac{1}{3}$, $\frac{1}{3}$)',
xy=(1.0/3, 1.0/3), xytext=(1.0/3, .23), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.plot([1.0/3], [1.0/3], 'ko', ms=5)
plt.annotate(r'($\frac{1}{2}$, $0$, $\frac{1}{2}$)',
xy=(.5, .0), xytext=(.5, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $\frac{1}{2}$, $\frac{1}{2}$)',
xy=(.0, .5), xytext=(.1, .5), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($\frac{1}{2}$, $\frac{1}{2}$, $0$)',
xy=(.5, .5), xytext=(.6, .6), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $0$, $1$)',
xy=(0, 0), xytext=(.1, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($1$, $0$, $0$)',
xy=(1, 0), xytext=(1, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $1$, $0$)',
xy=(0, 1), xytext=(.1, 1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
# Add grid
plt.grid("off")
for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
plt.plot([0, x], [x, 0], 'k', alpha=0.2)
plt.plot([0, 0 + (1-x)/2], [x, x + (1-x)/2], 'k', alpha=0.2)
plt.plot([x, x + (1-x)/2], [0, 0 + (1-x)/2], 'k', alpha=0.2)
plt.title("Change of predicted probabilities after sigmoid calibration")
plt.xlabel("Probability class 1")
plt.ylabel("Probability class 2")
plt.xlim(-0.05, 1.05)
plt.ylim(-0.05, 1.05)
plt.legend(loc="best")
print("Log-loss of")
print(" * uncalibrated classifier trained on 800 datapoints: %.3f "
% score)
print(" * classifier trained on 600 datapoints and calibrated on "
"200 datapoint: %.3f" % sig_score)
# Illustrate calibrator
plt.figure(1)
# generate grid over 2-simplex
p1d = np.linspace(0, 1, 20)
p0, p1 = np.meshgrid(p1d, p1d)
p2 = 1 - p0 - p1
p = np.c_[p0.ravel(), p1.ravel(), p2.ravel()]
p = p[p[:, 2] >= 0]
calibrated_classifier = sig_clf.calibrated_classifiers_[0]
prediction = np.vstack([calibrator.predict(this_p)
for calibrator, this_p in
zip(calibrated_classifier.calibrators_, p.T)]).T
prediction /= prediction.sum(axis=1)[:, None]
# Ploit modifications of calibrator
for i in range(prediction.shape[0]):
plt.arrow(p[i, 0], p[i, 1],
prediction[i, 0] - p[i, 0], prediction[i, 1] - p[i, 1],
head_width=1e-2, color=colors[np.argmax(p[i])])
# Plot boundaries of unit simplex
plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], 'k', label="Simplex")
plt.grid("off")
for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
plt.plot([0, x], [x, 0], 'k', alpha=0.2)
plt.plot([0, 0 + (1-x)/2], [x, x + (1-x)/2], 'k', alpha=0.2)
plt.plot([x, x + (1-x)/2], [0, 0 + (1-x)/2], 'k', alpha=0.2)
plt.title("Illustration of sigmoid calibrator")
plt.xlabel("Probability class 1")
plt.ylabel("Probability class 2")
plt.xlim(-0.05, 1.05)
plt.ylim(-0.05, 1.05)
plt.show()
| bsd-3-clause |
rajat1994/scikit-learn | sklearn/utils/tests/test_class_weight.py | 140 | 11909 | import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.datasets import make_blobs
from sklearn.utils.class_weight import compute_class_weight
from sklearn.utils.class_weight import compute_sample_weight
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
def test_compute_class_weight():
# Test (and demo) compute_class_weight.
y = np.asarray([2, 2, 2, 3, 3, 4])
classes = np.unique(y)
cw = assert_warns(DeprecationWarning,
compute_class_weight, "auto", classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_true(cw[0] < cw[1] < cw[2])
cw = compute_class_weight("balanced", classes, y)
# total effect of samples is preserved
class_counts = np.bincount(y)[2:]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_true(cw[0] < cw[1] < cw[2])
def test_compute_class_weight_not_present():
# Raise error when y does not contain all class labels
classes = np.arange(4)
y = np.asarray([0, 0, 0, 1, 1, 2])
assert_raises(ValueError, compute_class_weight, "auto", classes, y)
assert_raises(ValueError, compute_class_weight, "balanced", classes, y)
def test_compute_class_weight_invariance():
# Test that results with class_weight="balanced" is invariant wrt
# class imbalance if the number of samples is identical.
# The test uses a balanced two class dataset with 100 datapoints.
# It creates three versions, one where class 1 is duplicated
# resulting in 150 points of class 1 and 50 of class 0,
# one where there are 50 points in class 1 and 150 in class 0,
# and one where there are 100 points of each class (this one is balanced
# again).
# With balancing class weights, all three should give the same model.
X, y = make_blobs(centers=2, random_state=0)
# create dataset where class 1 is duplicated twice
X_1 = np.vstack([X] + [X[y == 1]] * 2)
y_1 = np.hstack([y] + [y[y == 1]] * 2)
# create dataset where class 0 is duplicated twice
X_0 = np.vstack([X] + [X[y == 0]] * 2)
y_0 = np.hstack([y] + [y[y == 0]] * 2)
# cuplicate everything
X_ = np.vstack([X] * 2)
y_ = np.hstack([y] * 2)
# results should be identical
logreg1 = LogisticRegression(class_weight="balanced").fit(X_1, y_1)
logreg0 = LogisticRegression(class_weight="balanced").fit(X_0, y_0)
logreg = LogisticRegression(class_weight="balanced").fit(X_, y_)
assert_array_almost_equal(logreg1.coef_, logreg0.coef_)
assert_array_almost_equal(logreg.coef_, logreg0.coef_)
def test_compute_class_weight_auto_negative():
# Test compute_class_weight when labels are negative
# Test with balanced class labels.
classes = np.array([-2, -1, 0])
y = np.asarray([-1, -1, 0, 0, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
# Test with unbalanced class labels.
y = np.asarray([-1, 0, 0, -2, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([0.545, 1.636, 0.818]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
class_counts = np.bincount(y + 2)
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2. / 3, 2., 1.])
def test_compute_class_weight_auto_unordered():
# Test compute_class_weight when classes are unordered
classes = np.array([1, 0, 3])
y = np.asarray([1, 0, 0, 3, 3, 3])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1.636, 0.818, 0.545]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
class_counts = np.bincount(y)[classes]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2., 1., 2. / 3])
def test_compute_sample_weight():
# Test (and demo) compute_sample_weight.
# Test with balanced classes
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with user-defined weights
sample_weight = compute_sample_weight({1: 2, 2: 1}, y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 1., 1., 1.])
# Test with column vector of balanced classes
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with unbalanced classes
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
expected_auto = np.asarray([.6, .6, .6, .6, .6, .6, 1.8])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y)
expected_balanced = np.array([0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 2.3333])
assert_array_almost_equal(sample_weight, expected_balanced, decimal=4)
# Test with `None` weights
sample_weight = compute_sample_weight(None, y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 1.])
# Test with multi-output of balanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with multi-output with user-defined weights
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = compute_sample_weight([{1: 2, 2: 1}, {0: 1, 1: 2}], y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 2., 2., 2.])
# Test with multi-output of unbalanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [3, -1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, expected_balanced ** 2, decimal=3)
def test_compute_sample_weight_with_subsample():
# Test compute_sample_weight with subsamples specified.
# Test with balanced classes and all samples present
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with column vector of balanced classes and all samples present
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with a subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y, range(4))
assert_array_almost_equal(sample_weight, [.5, .5, .5, 1.5, 1.5, 1.5])
sample_weight = compute_sample_weight("balanced", y, range(4))
assert_array_almost_equal(sample_weight, [2. / 3, 2. / 3,
2. / 3, 2., 2., 2.])
# Test with a bootstrap subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
expected_auto = np.asarray([1 / 3., 1 / 3., 1 / 3., 5 / 3., 5 / 3., 5 / 3.])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
expected_balanced = np.asarray([0.6, 0.6, 0.6, 3., 3., 3.])
assert_array_almost_equal(sample_weight, expected_balanced)
# Test with a bootstrap subsample for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_balanced ** 2)
# Test with a missing class
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
# Test with a missing class for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [2, 2]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
def test_compute_sample_weight_errors():
# Test compute_sample_weight raises errors expected.
# Invalid preset string
y = np.asarray([1, 1, 1, 2, 2, 2])
y_ = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
assert_raises(ValueError, compute_sample_weight, "ni", y)
assert_raises(ValueError, compute_sample_weight, "ni", y, range(4))
assert_raises(ValueError, compute_sample_weight, "ni", y_)
assert_raises(ValueError, compute_sample_weight, "ni", y_, range(4))
# Not "auto" for subsample
assert_raises(ValueError,
compute_sample_weight, {1: 2, 2: 1}, y, range(4))
# Not a list or preset for multi-output
assert_raises(ValueError, compute_sample_weight, {1: 2, 2: 1}, y_)
# Incorrect length list for multi-output
assert_raises(ValueError, compute_sample_weight, [{1: 2, 2: 1}], y_)
| bsd-3-clause |
zzcclp/spark | python/pyspark/pandas/tests/plot/test_series_plot_matplotlib.py | 14 | 13615 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import base64
from distutils.version import LooseVersion
from io import BytesIO
import unittest
import numpy as np
import pandas as pd
from pyspark import pandas as ps
from pyspark.pandas.config import set_option, reset_option
from pyspark.testing.pandasutils import (
have_matplotlib,
matplotlib_requirement_message,
PandasOnSparkTestCase,
TestUtils,
)
if have_matplotlib:
import matplotlib
from matplotlib import pyplot as plt
matplotlib.use("agg")
@unittest.skipIf(not have_matplotlib, matplotlib_requirement_message)
class SeriesPlotMatplotlibTest(PandasOnSparkTestCase, TestUtils):
@classmethod
def setUpClass(cls):
super().setUpClass()
if LooseVersion(pd.__version__) >= LooseVersion("0.25"):
pd.set_option("plotting.backend", "matplotlib")
set_option("plotting.backend", "matplotlib")
set_option("plotting.max_rows", 1000)
@classmethod
def tearDownClass(cls):
if LooseVersion(pd.__version__) >= LooseVersion("0.25"):
pd.reset_option("plotting.backend")
reset_option("plotting.backend")
reset_option("plotting.max_rows")
super().tearDownClass()
@property
def pdf1(self):
return pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9, 15, 50]}, index=[0, 1, 3, 5, 6, 8, 9, 9, 9, 10, 10]
)
@property
def psdf1(self):
return ps.from_pandas(self.pdf1)
@property
def psdf2(self):
return ps.range(1002)
@property
def pdf2(self):
return self.psdf2.to_pandas()
@staticmethod
def plot_to_base64(ax):
bytes_data = BytesIO()
ax.figure.savefig(bytes_data, format="png")
bytes_data.seek(0)
b64_data = base64.b64encode(bytes_data.read())
plt.close(ax.figure)
return b64_data
def test_bar_plot(self):
pdf = self.pdf1
psdf = self.psdf1
ax1 = pdf["a"].plot(kind="bar", colormap="Paired")
bin1 = self.plot_to_base64(ax1)
ax2 = psdf["a"].plot(kind="bar", colormap="Paired")
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
ax1 = pdf["a"].plot(kind="bar", colormap="Paired")
bin1 = self.plot_to_base64(ax1)
ax2 = psdf["a"].plot(kind="bar", colormap="Paired")
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
def test_bar_plot_limited(self):
pdf = self.pdf2
psdf = self.psdf2
_, ax1 = plt.subplots(1, 1)
ax1 = pdf["id"][:1000].plot.bar(colormap="Paired")
ax1.text(
1,
1,
"showing top 1000 elements only",
size=6,
ha="right",
va="bottom",
transform=ax1.transAxes,
)
bin1 = self.plot_to_base64(ax1)
_, ax2 = plt.subplots(1, 1)
ax2 = psdf["id"].plot.bar(colormap="Paired")
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
def test_pie_plot(self):
pdf = self.pdf1
psdf = self.psdf1
ax1 = pdf["a"].plot.pie(colormap="Paired")
bin1 = self.plot_to_base64(ax1)
ax2 = psdf["a"].plot.pie(colormap="Paired")
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
ax1 = pdf["a"].plot(kind="pie", colormap="Paired")
bin1 = self.plot_to_base64(ax1)
ax2 = psdf["a"].plot(kind="pie", colormap="Paired")
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
def test_pie_plot_limited(self):
pdf = self.pdf2
psdf = self.psdf2
_, ax1 = plt.subplots(1, 1)
ax1 = pdf["id"][:1000].plot.pie(colormap="Paired")
ax1.text(
1,
1,
"showing top 1000 elements only",
size=6,
ha="right",
va="bottom",
transform=ax1.transAxes,
)
bin1 = self.plot_to_base64(ax1)
_, ax2 = plt.subplots(1, 1)
ax2 = psdf["id"].plot.pie(colormap="Paired")
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
def test_line_plot(self):
pdf = self.pdf1
psdf = self.psdf1
ax1 = pdf["a"].plot(kind="line", colormap="Paired")
bin1 = self.plot_to_base64(ax1)
ax2 = psdf["a"].plot(kind="line", colormap="Paired")
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
ax1 = pdf["a"].plot.line(colormap="Paired")
bin1 = self.plot_to_base64(ax1)
ax2 = psdf["a"].plot.line(colormap="Paired")
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
def test_barh_plot(self):
pdf = self.pdf1
psdf = self.psdf1
ax1 = pdf["a"].plot(kind="barh", colormap="Paired")
bin1 = self.plot_to_base64(ax1)
ax2 = psdf["a"].plot(kind="barh", colormap="Paired")
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
def test_barh_plot_limited(self):
pdf = self.pdf2
psdf = self.psdf2
_, ax1 = plt.subplots(1, 1)
ax1 = pdf["id"][:1000].plot.barh(colormap="Paired")
ax1.text(
1,
1,
"showing top 1000 elements only",
size=6,
ha="right",
va="bottom",
transform=ax1.transAxes,
)
bin1 = self.plot_to_base64(ax1)
_, ax2 = plt.subplots(1, 1)
ax2 = psdf["id"].plot.barh(colormap="Paired")
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
def test_hist(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9, 15, 50]}, index=[0, 1, 3, 5, 6, 8, 9, 9, 9, 10, 10]
)
psdf = ps.from_pandas(pdf)
def plot_to_base64(ax):
bytes_data = BytesIO()
ax.figure.savefig(bytes_data, format="png")
bytes_data.seek(0)
b64_data = base64.b64encode(bytes_data.read())
plt.close(ax.figure)
return b64_data
_, ax1 = plt.subplots(1, 1)
# Using plot.hist() because pandas changes ticks props when called hist()
ax1 = pdf["a"].plot.hist()
_, ax2 = plt.subplots(1, 1)
ax2 = psdf["a"].hist()
self.assert_eq(plot_to_base64(ax1), plot_to_base64(ax2))
def test_hist_plot(self):
pdf = self.pdf1
psdf = self.psdf1
_, ax1 = plt.subplots(1, 1)
ax1 = pdf["a"].plot.hist()
bin1 = self.plot_to_base64(ax1)
_, ax2 = plt.subplots(1, 1)
ax2 = psdf["a"].plot.hist()
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
ax1 = pdf["a"].plot.hist(bins=15)
bin1 = self.plot_to_base64(ax1)
ax2 = psdf["a"].plot.hist(bins=15)
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
ax1 = pdf["a"].plot(kind="hist", bins=15)
bin1 = self.plot_to_base64(ax1)
ax2 = psdf["a"].plot(kind="hist", bins=15)
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
ax1 = pdf["a"].plot.hist(bins=3, bottom=[2, 1, 3])
bin1 = self.plot_to_base64(ax1)
ax2 = psdf["a"].plot.hist(bins=3, bottom=[2, 1, 3])
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
def test_area_plot(self):
pdf = pd.DataFrame(
{
"sales": [3, 2, 3, 9, 10, 6],
"signups": [5, 5, 6, 12, 14, 13],
"visits": [20, 42, 28, 62, 81, 50],
},
index=pd.date_range(start="2018/01/01", end="2018/07/01", freq="M"),
)
psdf = ps.from_pandas(pdf)
ax1 = pdf["sales"].plot(kind="area", colormap="Paired")
bin1 = self.plot_to_base64(ax1)
ax2 = psdf["sales"].plot(kind="area", colormap="Paired")
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
ax1 = pdf["sales"].plot.area(colormap="Paired")
bin1 = self.plot_to_base64(ax1)
ax2 = psdf["sales"].plot.area(colormap="Paired")
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
# just a sanity check for df.col type
ax1 = pdf.sales.plot(kind="area", colormap="Paired")
bin1 = self.plot_to_base64(ax1)
ax2 = psdf.sales.plot(kind="area", colormap="Paired")
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
def test_box_plot(self):
def check_box_plot(pser, psser, *args, **kwargs):
_, ax1 = plt.subplots(1, 1)
ax1 = pser.plot.box(*args, **kwargs)
_, ax2 = plt.subplots(1, 1)
ax2 = psser.plot.box(*args, **kwargs)
diffs = [
np.array([0, 0.5, 0, 0.5, 0, -0.5, 0, -0.5, 0, 0.5]),
np.array([0, 0.5, 0, 0]),
np.array([0, -0.5, 0, 0]),
]
try:
for i, (line1, line2) in enumerate(zip(ax1.get_lines(), ax2.get_lines())):
expected = line1.get_xydata().ravel()
actual = line2.get_xydata().ravel()
if i < 3:
actual += diffs[i]
self.assert_eq(pd.Series(expected), pd.Series(actual))
finally:
ax1.cla()
ax2.cla()
# Non-named Series
pser = pd.Series([1, 2, 3, 4, 5, 6, 7, 8, 9, 15, 50], [0, 1, 3, 5, 6, 8, 9, 9, 9, 10, 10])
psser = ps.from_pandas(pser)
spec = [(self.pdf1.a, self.psdf1.a), (pser, psser)]
for p, k in spec:
check_box_plot(p, k)
check_box_plot(p, k, showfliers=True)
check_box_plot(p, k, sym="")
check_box_plot(p, k, sym=".", color="r")
check_box_plot(p, k, use_index=False, labels=["Test"])
check_box_plot(p, k, usermedians=[2.0])
check_box_plot(p, k, conf_intervals=[(1.0, 3.0)])
val = (1, 3)
self.assertRaises(
ValueError, lambda: check_box_plot(self.pdf1, self.psdf1, usermedians=[2.0, 3.0])
)
self.assertRaises(
ValueError, lambda: check_box_plot(self.pdf1, self.psdf1, conf_intervals=[val, val])
)
self.assertRaises(
ValueError, lambda: check_box_plot(self.pdf1, self.psdf1, conf_intervals=[(1,)])
)
def test_kde_plot(self):
def moving_average(a, n=10):
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1 :] / n
def check_kde_plot(pdf, psdf, *args, **kwargs):
_, ax1 = plt.subplots(1, 1)
ax1 = pdf["a"].plot.kde(*args, **kwargs)
_, ax2 = plt.subplots(1, 1)
ax2 = psdf["a"].plot.kde(*args, **kwargs)
try:
for i, (line1, line2) in enumerate(zip(ax1.get_lines(), ax2.get_lines())):
expected = line1.get_xydata().ravel()
actual = line2.get_xydata().ravel()
# TODO: Due to implementation difference, the output is different comparing
# to pandas'. We should identify the root cause of difference, and reduce
# the diff.
# Note: Data is from 1 to 50. So, it smooths them by moving average and compares
# both.
self.assertTrue(
np.allclose(moving_average(actual), moving_average(expected), rtol=3)
)
finally:
ax1.cla()
ax2.cla()
check_kde_plot(self.pdf1, self.psdf1, bw_method=0.3)
check_kde_plot(self.pdf1, self.psdf1, ind=[1, 2, 3, 4, 5], bw_method=3.0)
def test_empty_hist(self):
pdf = self.pdf1.assign(categorical="A")
psdf = ps.from_pandas(pdf)
psser = psdf["categorical"]
with self.assertRaisesRegex(TypeError, "Empty 'DataFrame': no numeric data to plot"):
psser.plot.hist()
def test_single_value_hist(self):
pdf = self.pdf1.assign(single=2)
psdf = ps.from_pandas(pdf)
_, ax1 = plt.subplots(1, 1)
ax1 = pdf["single"].plot.hist()
bin1 = self.plot_to_base64(ax1)
_, ax2 = plt.subplots(1, 1)
ax2 = psdf["single"].plot.hist()
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
if __name__ == "__main__":
from pyspark.pandas.tests.plot.test_series_plot_matplotlib import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
nuowang/DoctorSnapshot | src/step05_train_lda_model.py | 1 | 17392 | #!/usr/bin/env python3
# Created by Nuo Wang.
# Last modified on 8/17/2017.
# Required libraries.
import pandas as pd
import nltk
from nltk.tokenize import RegexpTokenizer
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
import gensim
from gensim import corpora, models, similarities
import logging
import pickle
### Step 1: Setting up.
# Load my dataset.
reviews = pd.read_csv("PATH/data/yelp_reviews.csv")
doctors = pd.read_csv("PATH/data/yelp_doctors.csv")
# The list to save the text bodies of all reviews.
review_main_text_list = []
# Add all review texts to the above list.
for i in range(0, len(reviews)):
paragraphs = reviews.loc[i]["review main text"]
review_main_text_list.append(paragraphs)
# Set up tokenizer.
tokenizer = RegexpTokenizer(r'\w+')
# Set up stop words.
stop = set(stopwords.words('english'))
# Set up stemmer.
p_stemmer = PorterStemmer()
# Set up logging for LDA in gensim.
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
### Step 2: Clean up texts.
# List for the review texts that are tokenized, stop word deleted and stemmed.
cleaned_up_review_list = []
# For every review.
for document in review_main_text_list:
# Use the lowercase of all letters.
raw = document.lower()
# Tokenization
tokens = tokenizer.tokenize(raw)
# Delete stop words.
j = 0
while j < len(tokens):
if tokens[j] in stop:
del tokens[j]
else:
j += 1
# Stem each word.
cleaned_text = [p_stemmer.stem(i) for i in tokens]
# Add cleaned review text to list.
cleaned_up_review_list.append(cleaned_text)
# Find words frequency.
all_words = []
for i in cleaned_up_review_list:
for j in i:
all_words.append(j)
fdist = nltk.FreqDist(all_words)
# Print the most common words.
print(fdist.most_common())
### Step 3: Delete more unwanted words and generate corpus.
# The words to ignore.
words_to_ignore = ["dr", "doctor"]
# Alternative versions of popular words to ignore.
# It turns out that if one ignores too many low frequency words, the LDA results are worse.
# words_to_ignore = ["dr", "doctor", "yelp", "dc", "dos", "dmd", "do", "dpm", "mbbch", "md", "od", "rpt", "california", "stanford", "ucla", "ucsf", "usc", "ucsd", "san", "francisco", "diego", "los", "angeles", "oakland", "beverly", "hills", "daly", "santa", "monica", "alamo", "solana", "beach", "poway", "del", "mar", "la", "jolla", "santee", "northridge", "rafael", "panorama", "canada", "flintridge", "glendale", "canyon", "westlake", "village", "riverside", "van", "nuys", "burbank", "tarzana", "encino", "oaks", "fernando", "pasadena", "rancho", "cucamonga", "hollywood", "institute", "york", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", "korean", "filipino", "chinese", "embarcadero", "sacramento", "deborah", "irish", "http", "bob", "sculli", "claud", "washington", "meister", "eydelman"]
# words_to_ignore = ["acn","agoura","botox","canal","cardiologist","cedar","colonoscopi","dental","dentist","dermatologist","ear","eczema","eye","face","foot","fungal","hair","haircut","hip","http","jewish","knee","laser","lasik","mr","optometrist","orthoped","pediatrician","perm","physic","physician","salon","straight","surgeon","surgeri","www"]
# Also ignore all doctor names. This list is semi-automatically generated (code not shown here).
doctor_names = ['Ganata', 'Brian', 'Stein', 'Jack', 'Mitchell', 'Masami', 'Belcher', 'Rosanelli', 'Weissman', 'Nobel', 'Johnson', 'Michele', 'Shirazi', 'Francis', 'John', 'Vidush', 'Scott', 'Grogan', 'Sands', 'Navneet', 'Abrishami', 'Yun', 'Trott', 'Line', 'Sumati', 'Binstock', 'Ulrika', 'Yousefi', 'Ma', 'Guy', 'Mullens', 'Green', 'Kelly', 'Schiller', 'Ullman', 'Eric', 'Podolin', 'Vartany', 'Sanaz', 'Kashani', 'Collins', 'Jose', 'Mehdizadeh', 'Tiller', 'Baron', 'Maryam', 'Hill', 'Flynn', 'Cecilio', 'Kishibay', 'Stephane', 'Felicia', 'Enayati', 'M', 'Caroline', 'Fahimian', 'Glenn', 'Richard', 'Lam', 'Bakshian', 'Besser', 'Meth', 'Nassos', 'Walker', 'Pena-Robles', 'Forouzesh', 'Rochester', 'Friedman', 'Tri', 'Robert', 'Parnaz', 'Seibert', 'Sterman', 'Hakimian', 'Anupam', 'Rivera', 'Ardizzone', 'Chun', 'Vu', 'Eduardo', 'Lindsay', 'Dave', 'Mirabadi', 'Benbow', 'Fidel', 'Akizuki', 'Carlos C.', 'Wong', 'Rudy', 'Agnes', 'Venuturupalli', 'Fox', 'Urusova', 'Kourosh', 'Potruch', 'Jane', 'Jerry', 'Veronique', 'Hariri', 'Griffiths', 'Wadhwa', 'Maria', 'Stone', 'Lofthus', 'Metaxas', 'Cho', 'Fitzgerald', 'Larry', 'Ellie', 'Armine', 'Virginia', 'Kapla', 'Cohen', 'Yussef', 'Buoncristiani', 'Miao', 'Neidlinger', 'Sajben', 'Gladstein', 'Hillary', 'Samuel', 'Loraine', 'Hakop', 'Benito', 'Mikus', 'Mark', 'Inna', 'Pham', 'Luftman', 'Jacobitz', 'Fine', 'Yuo', 'Annette', 'Miller', 'Shadan', 'Bokosky', 'Sarosy', 'Kathleen', 'Lawrence', 'Boodaghians', 'Tessler', 'Jiu', 'Sandy', 'Babak', 'Michel', 'Yampolsky', 'Roykh', 'Carl', 'Berty', 'Liu', 'Tao', 'Stacy', 'Shahab', 'Catherine', 'Samimi', 'Crane', 'Violet', 'Cynthia', 'Smith', 'Schumacher', 'Kaufman', 'Carlton', 'Friedland', 'Belaga', 'Pourrahimi', 'Montgomery', 'Oo', 'Etie', 'Lakshmi', 'Henderson', 'Shaun', 'Chou', 'Antigone', 'Woods', 'Redlin', 'Yang', 'Carey', 'Pack', 'Sirius', 'Huh', 'Tamara', 'Leon', 'Deleaver-Russell', 'Neifeld', 'Katz', 'Heidenfelder', 'Melanie', 'Mansour', 'Stephen', 'Stern', 'Buchi', 'Yvonne', 'Henry', 'Aurasteh', 'Cafaro', 'Dillingham', 'Obrien', 'Putnam', 'Hyver', 'Ghada', 'Burstein', 'Winchell', 'Peggy', 'Kyoko', 'Garcia', 'Morganroth', 'Shankar', 'Jonah', 'Yorobe', 'Donald', 'Chow', 'Cepkinian', 'Hamidi', 'Chittaphong', 'Snibbe', 'Gennady', 'Tim', 'Joie', 'Rosenbaum', 'Lamont', 'Margossian', 'Silverman', 'Hoosik', 'Lewis', 'Le', 'Justine', 'Linda', 'Maureen', 'Dadvand', 'Mirkarimi', 'Quintero', 'Madany', 'Apelian', 'Ken', 'Berman', 'Corey Bloom', 'Kutzscher', 'Gafori', 'Gevorkyan', 'Akagi', 'Markison', 'Yabumoto', 'Valenton', 'Amir', 'Degolia', 'Kruse', 'Davidson', 'Ahluwalia', 'George', 'Elisa', 'Stephanie', 'Babajanian', 'Schultz', 'Sonal', 'Mohammad', 'Thoene', 'Wan', 'Babapour', 'Richards', 'Minkowsky', 'Galitzer', 'Myunghae', 'Vadim', 'Hess', 'Pegah', 'Kawashiri', 'Jay', 'Kram', 'Aguilar', 'Paiement', 'Theodore', 'Oleary', 'Izumi', 'Kathryn', 'Robin', 'Edgardo', 'Yoon', 'Ashish', 'Mike', 'Farinoush', 'Kimberly', 'Hornbrook', 'Betsy', 'Isaac', 'Massoudi', 'Gottlieb', 'Falcon', 'Athyal', 'Chu', 'Mynsberge', 'Helen', 'Collin', 'Ambartsumyan', 'Stacey', 'Romeo', 'Goei', 'Colbert', 'Hong', 'Brar', 'Mario', 'Levin', 'Charles', 'Fischbein', 'Meghan', 'Navab', 'Luis', 'Xiang', 'Orecklin', 'Banooni', 'Girard', 'Abe', 'Bhoot', 'Ebrahimi', 'Saliman', 'Joseph', 'Koltzova-Rang', 'Fakhouri', 'Matt', 'Dhir', 'Ghosh', 'Walter', 'Mehras', 'Pudberry', 'Satinder', 'Mehranpour', 'Rajan', 'Mann', 'Chang', 'Juliana', 'Sid', 'Hernandez', 'Arretz', 'Gregory', 'Marion', 'Howell', 'Greg', 'Van', 'Akhavan', 'Kerry', 'Blechman', 'Cha', 'Leopoldo', 'Jeannie', 'Cook', 'Giuliano', 'Scheinhorn', 'Weber', 'Raymond', 'Kupfer', 'Leung', 'Sarvenaz', 'Tuft', 'Amanda', 'Chobanian', 'Sima', 'Marc', 'Susan', 'Minoo', 'Ali', 'Eubanks', 'Soliman', 'Diego', 'Galpin', 'Sandler', 'Cinque', 'Eastman', 'Low', 'Marino', 'Lily', 'Barzman', 'Anita', 'Melissa', 'Maloney', 'Davies', 'Khoubnazar', 'Heather', 'Rachel', 'Molayem', 'Nita', 'Dan', 'Alison', 'Anderson', 'Alexander', 'Juma', 'Pool', 'Gin', 'Ganjianpour', 'Foster', 'Marriott', 'Bakst', 'Colette', 'Rust', 'Rahimian', 'Malamed', 'Jones', 'Nancy', 'Perl', 'Chester', 'Jasmine', 'Jared', 'Ehmer', 'Kalpari', 'Nazareth', 'Cortland', 'Chase', 'Arash', 'Birns', 'Bill', 'Eugene', 'Everson', 'David', 'Berkowitz', 'Nguyentu', 'Morteza', 'Zareh', 'May', 'Christine', 'Abuav', 'Amanuel', 'Mahnaz', 'Gingold', 'Desiree', 'Bruce', 'Parvin', 'McLucas', 'Azer', 'Kassab', 'Mandel', 'Shamie', 'Panosian', 'Andrea', 'Jerome', 'Sanjay', 'Bhuva', 'Gayane', 'Baker', 'Asaf', 'Gaminchi', 'Tran', 'Mamta', 'Philippe', 'Tang', 'Peiman', 'Rosen', 'Shiell', 'Iskander', 'Stefan', 'Levy', 'Jessica', 'Fung', 'Bailey', 'Edmund', 'Borah', 'Hulley', 'Armando', 'Kambiz', 'Stuart', 'Youssef', 'Blumstein', 'Groth', 'Frederica', 'Karpman', 'Palm', 'Nguyen', 'Davis', 'Zapata', 'Aizuss', 'William', 'Amirtharajah', 'Darush', 'Judy', 'Dina', 'Hans', 'Andre', 'Gary', 'Regan', 'Christman', 'Riedler', 'Justus', 'Yulia', 'Allan', 'Miclau', 'Edwin', 'Scalise', 'Bardowell', 'Jeanne', 'Reid', 'Agata', 'Reiche', 'Laurie', 'Castillo', 'Abraham', 'Arnold', 'Levi', 'Pedro', 'Thomas', 'Paul', 'Boska', 'Doan', 'Ng', 'Moradzadeh', 'Wolfe', 'Khalil', 'Park', 'Liao', 'Timothy', 'Nikolaj', 'Sinclair', 'Karlsberg', 'Yelding-Sloan', 'Marcelo', 'Graham', 'Shih', 'Bhatia', 'Waleed', 'Foxman', 'Khosravi', 'Yee', 'Inouye', 'Joshua', 'Edelson', 'Najarian', 'Klasky', 'Knox', 'Vaughn', 'Boykoff', 'Teguh', 'Kwang', 'Oliver', 'Alex', 'Debra', 'Snunit', 'Irene', 'Auerbach', 'Kearney', 'Gupta', 'Royeen', 'Tristan', 'Binder', 'Min', 'Kevin', 'Monali', 'Golshani', 'Emrani', 'Rostker', 'Lieu', 'Lepor', 'Massry', 'Claire', 'Rita', 'Cotter', 'Drell', 'Zdzislaus', 'Schechter', 'Vail', 'Kind', 'Marilyn', 'Jockin', 'Mantell', 'Kramer', 'Shaden', 'Sherman', 'Rafael', 'Holly', 'Jick', 'Uyeki', 'Ginsberg', 'Gold', 'Afshine', 'Casper', 'Khin', 'Hofstadter', 'Safvati', 'Suhail', 'Nick', 'Krames', 'Afshin', 'Myers', 'Sawsan', 'Meng', 'Mayer', 'Silberstein', 'Barkley', 'Kamanine', 'Warren', 'Shindy', 'Aslanian', 'Dembo-Smeaton', 'Prathipati', 'Rosenberg', 'Jeremy', 'Cepeda', 'Jade', 'Barnhard', 'Duong', 'Habib', 'Eisenhart', 'Tamayo', 'Goodman', 'Danelle', 'Christopher', 'Darakjian', 'Hal', 'Marquez', 'Moshe', 'Harry', 'Philip', 'Shirley', 'Alen', 'Hsu', 'Tu', 'Renee', 'Kiriakos', 'Estes', 'Kayekjian', 'Lynn', 'Kim', 'Perry', 'Tachdjian', 'Marnell', 'Karen', 'Sosa', 'Vahan', 'Talreja', 'Estwick', 'Floyd', 'Nilesh', 'Adrien', 'Yamaguchi', 'Tamer', 'Oechsel', 'Sandeep', 'Tabsh', 'Bowden', 'Bohn', 'Day', 'Edward', 'James', 'Zonia', 'Mani', 'Young', 'Stamper', 'Simon', 'Dougherty', 'Touradge', 'Otoole', 'Bresnick', 'Quan', 'Leslie', 'Sellman', 'Matthew', 'Donna', 'Swerdlow', 'Daphne', 'Mohamed', 'Pigeon', 'Marshall', 'Sawusch', 'Newcomer', 'Ganelis', 'Dysart', 'Hamilton', 'Russo', 'Lavi', 'Hayward', 'Cheryl', 'Jessely', 'Rabinovich', 'Lee', 'Mohammed', 'Rashti', 'Sarkisian', 'Factor', 'Jeannine', 'Laurence', 'Skoulas', 'Amarpreet', 'Bajaj', 'Beller', 'Villanueva', 'Lenzkes', 'Hu', 'Victor', 'Blair', 'Tarick', 'Alla', 'Dardashti', 'Tsoi', 'Sumeer', 'Mancherian', 'Levinson', 'Borookhim', 'Shervin', 'Flores', 'Bessie', 'Irina', 'Levine', 'Schofferman', 'Draupadi', 'Lipton', 'Epstein', 'Yen', 'Agbuya', 'Ruder', 'Mansfield', 'Rawat', 'Lin', 'Alessi', 'Eshaghian', 'Takahashi', 'Keyvan', 'Rashtian', 'Derrick', 'Arya Nick', 'Bert', 'Quock', 'Shukri', 'Flaherty', 'Kvitash', 'Man', 'Diamond', 'Choi', 'Lisa', 'Miremadi', 'Wendy', 'Liau', 'Lofquist', 'Shunpei', 'Douglas', 'Saito', 'Roya', 'Jenkin', 'Ralph', 'Dicks', 'Moghissi', 'De Luna', 'Michael', 'Nadiv', 'Schwanke', 'Kapoor', 'Starrett', 'Yip', 'Akash', 'Reese', 'Vera', 'Bridge', 'Feghali', 'Rosenbach', 'Jennifer', 'Brenda', 'Garg', 'Haleh', 'Chin', 'Yoo', 'Harold', 'Goodwin', 'Feltman', 'Shehnaz', 'Wu', 'Hendry', 'Emmanuel', 'Elena', 'Lakshman', 'Danzer', 'Maurice', 'Farnaz', 'Rose', 'Leitner', 'Khodabakhsh', 'Nam', 'Simoni', 'Parviz', 'Biderman', 'Snyder', 'Jacob', 'Anmar', 'Justin', 'Soleimani', 'Waring', 'Mueller', 'Fishman', 'Custis', 'Ann', 'Gilman', 'Nunes', 'Flach', 'Gores', 'Larian', 'Dana', 'Yokoyama', 'Dalwani', 'Chunbong', 'Grady', 'Carlos', 'Diana', 'Nora', 'Roberts', 'Elayne', 'Yvette', 'Weiss', 'Larisse', 'McMillan', 'Cesar', 'Kang', 'Chan', 'Gabriel', 'Rabin', 'Milder', 'Chenette', 'Lawton', 'Garabed', 'Malhotra', 'Char', 'Makassebi', 'Patel', 'Mesler', 'Eisele', 'Kenneth', 'Tamkin', 'Salit', 'Abhay', 'Keith', 'Sternberg', 'Wolff', 'Cortez', 'Rhee', 'Plance', 'Vincent', 'Pivo', 'Boone', 'Jonathan', 'Rosanna', 'Sangdo', 'Suzanne', 'Yu', 'Vega', 'Strom', 'Lau', 'Ben-Ozer', 'Hoyman', 'Bryan', 'Garbis', 'Hattori', 'Kahn-Rose', 'Macy', 'Woolf', 'Tamarin', 'Genen', 'Gaytan', 'Ramtin', 'Valerie', 'Atkin', 'Solomon', 'Fossett', 'Mahshid', 'Alikpala', 'Neustein', 'Tasto', 'Arjang', 'Ebrahim', 'Lief', 'Lara', 'Raul', 'Leah', 'Raffi', 'Darragh', 'Howard', 'Pedrotty', 'Serena', 'Chandrasekhar', 'Irving', 'Berdjis', 'Brown', 'Diggs', 'Sverdlov', 'Reza', 'Alza', 'Felipe', 'Yamada', 'Frederick', 'Jerrold', 'Orpilla', 'Peter', 'Barry', 'Sakhai', 'Alan', 'Kadner', 'Patrick', 'Jeffrey', 'Armstrong', 'Kleid', 'Paula', 'Tahani', 'Garber', 'Watson', 'Melvin', 'Gustavo', 'Roth', 'Smaili', 'Hoang', 'Devron', 'Daws', 'Tuan', 'Trojnar', 'Bong', 'Katherine', 'Nesari', 'Kawilarang', 'Ronald', 'Kamran', 'Gordon', 'Menendez', 'Bortz', 'Massey', 'Rubenzik', 'Alfred', 'Marina', 'Fawaz', 'Shafipour', 'Bloomfield', 'Feldman', 'Chua', 'Pouya', 'Peyman', 'Norman', 'Stefani', 'Yazdani', 'Sameer', 'Dohad', 'Kurtz', 'Molato', 'Refoa', 'Marie', 'Engel', 'Pamela', 'Caroll', 'Daneshgar', 'Sun', 'Haley', 'Valentina', 'Leonard', 'Maeck', 'Michelle', 'Roohipour', 'Faisal', 'Payam', 'Kramar', 'Kerman', 'Sherwin', 'Khoury', 'Garrick', 'Leyli', 'Wanski', 'Cardon', 'Pean', 'Assil', 'Bahadori', 'Andrew', 'Rodney', 'Chiu', 'Taaly', 'Remy', 'Fisher', 'Sharon', 'Melody', 'Bala', 'Armen', 'Mobasser', 'Joy', 'Nader', 'Beeve', 'Hammond', 'Vanhale', 'Cheung', 'Cheng', 'Kranson', 'Sloan', 'Delois', 'Silani', 'Wieder', 'Vafaie', 'Chien', 'Brandeis', 'Su', 'Gamache', 'Ray', 'Bean', 'Mohana', 'Guido', 'Starnes', 'Chong', 'Martin', 'Daniel', 'Biana', 'Schulman', 'Marianne', 'Randolph', 'Morris', 'Xilin', 'Bailony', 'Reynaldo', 'Caldwell', 'Song', 'Herbert', 'Saad', 'Elgan', 'Esther', 'Nasimeh', 'Paz', 'Greenberg', 'Hopper', 'Derek', 'Grant', 'Vlad', 'Kaplan', 'Amini', 'Albert', 'Kling', 'Benjamin', 'Sam', 'Cabrera', 'Chiu-Collins', 'Wolfson', 'Margo', 'Cowan', 'Chen', 'Payman', 'Rish', 'Sanders', 'Cameron', 'Owens', 'Phillips', 'Dao', 'Allison', 'Maywood', 'Elliot', 'Jody', 'Thaik', 'Korchek', 'Eng', 'Ton', 'Thuc', 'Nathan', 'Glen', 'Bickman', 'Reyes', 'Sarafzadeh', 'Hansen', 'Yuan', 'Nikole', 'Mervin', 'Aiache', 'Iwata', 'Considine', 'Tyler', 'Rodriguez', 'Dawn', 'Steven', 'Carol', 'E.', 'Kamyar', 'Tin', 'Jason', 'Moy', 'Duncan', 'Merilynn', 'Dye', 'Chaves', 'Sajedi', 'Strelkoff', 'Lattanza', 'Janet', 'Joan', 'Elizabeth', 'Weller', 'Swamy', 'Rupsa', 'Laura', 'Shu', 'Joana', 'Jan', 'Joel', 'Rubinstein', 'Co']
# List to store the cleaned up doctor names.
doctor_names_cleaned_up = []
for name in doctor_names:
doctor_names_cleaned_up.append(p_stemmer.stem(name.lower()))
# Append cleaned up doctor names to the words to ignore.
words_to_ignore += doctor_names_cleaned_up
# Filter out the words that we want to ignore.
cleaned_up_review_list2 = list(cleaned_up_review_list)
for tokens in cleaned_up_review_list2:
# Delete specificed words.
j = 0
while j < len(tokens):
if tokens[j] in words_to_ignore:
del tokens[j]
else:
j += 1
# Generate dictionary and corpus from the remaining words.
dictionary = gensim.corpora.Dictionary(cleaned_up_review_list2)
corpus = [dictionary.doc2bow(word) for word in cleaned_up_review_list2]
# TF-IDF.
# TF-IDF does not work well for this dataset, it leads to worse results.
# I have a good explanation why, you can ask me.
# tfidf = models.TfidfModel(corpus)
# corpus = tfidf[corpus]
### Step 4: Train LDA model.
# Set up LDA model parameters.
no_of_topics = 15
passes_in = 100
# Train LDA model and save the model results.
ldamodel = gensim.models.ldamodel.LdaModel(corpus, num_topics=no_of_topics, id2word = dictionary, passes=passes_in, alpha='asymmetric')
pickle.dump(ldamodel, open("PATH/model/lda.pickle", "wb"))
pickle.dump(dictionary, open("PATH/model/dictionary.pickle", "wb"))
pickle.dump(corpus, open("PATH/model/corpus.pickle", "wb"))
# Check resulting topics.
topic_list = ldamodel.print_topics(num_topics=no_of_topics, num_words=15)
for index, i in enumerate(topic_list):
str1 = str(i[1])
for c in "0123456789+*\".":
str1 = str1.replace(c, "")
str1 = str1.replace(" ", " ")
print(str1)
########################################################
#### Using a toy corpus to show why TF-IDF doesn't work.
texts = [["dentist", "teeth", "insurance", "bill", "pizza"],
["dentist", "teeth", "appointment", "calll", "magician"],
["insurance", "bill", "appointment", "call", "fifteen"]]
dictionary_text = gensim.corpora.Dictionary(texts)
corpus_text = [dictionary_text.doc2bow(word) for word in texts]
tfidf_text = models.TfidfModel(corpus_text)
corpus_tfidf = tfidf_text[corpus_text]
ldamodel_text_1 = gensim.models.ldamodel.LdaModel(corpus_text, num_topics=4, id2word = dictionary_text, passes=200)
ldamodel_text_2 = gensim.models.ldamodel.LdaModel(corpus_tfidf, num_topics=4, id2word = dictionary_text, passes=200)
topic_list_1 = ldamodel_text_1.print_topics()
topic_list_2 = ldamodel_text_2.print_topics()
for index, i in enumerate(topic_list_1):
str1 = str(i[1])
for c in "0123456789+*\".":
str1 = str1.replace(c, "")
str1 = str1.replace(" ", " ")
print(str1)
print()
for index, i in enumerate(topic_list_2):
str1 = str(i[1])
for c in "0123456789+*\".":
str1 = str1.replace(c, "")
str1 = str1.replace(" ", " ")
print(str1)
| mit |
netceteragroup/esa-beam | beam-3dveglab-vlab/src/main/scenes/librat_scenes/rpv_invert.py | 1 | 20861 | #!/usr/bin/env python
import sys, os, argparse, glob
import numpy as np
import matplotlib.pyplot as plt
import scipy.optimize
from math import sqrt
debug = True
# Nelder-Mead simplex minimization of a nonlinear (multivariate) function.
#
# The programming interface is via the minimize() function; see below.
#
# This code has been adapted from the C-coded nelmin.c which was
# adapted from the Fortran-coded nelmin.f which was, in turn, adapted
# from the papers
#
# J.A. Nelder and R. Mead (1965)
# A simplex method for function minimization.
# Computer Journal, Volume 7, pp 308-313.
#
# R. O'Neill (1971)
# Algorithm AS47. Function minimization using a simplex algorithm.
# Applied Statistics, Volume 20, pp 338-345.
#
# and some examples are in
#
# D.M. Olsson and L.S. Nelson (1975)
# The Nelder-Mead Simplex procedure for function minimization.
# Technometrics, Volume 17 No. 1, pp 45-51.
#
# For a fairly recent and popular incarnation of this minimizer,
# see the amoeba function in the famous "Numerical Recipes" text.
#
# P. Jacobs
# School of Engineering, The University of Queensland
# 07-Jan-04
#
# Modifications by C. Schenkel
# Netcetera
# 31-Oct-13
#-----------------------------------------------------------------------
class VLAB:
def Minimize_create_new_point(c1, p1, c2, p2):
"""
Create a new N-dimensional point as a weighting of points p1 and p2.
"""
p_new = []
for j in range(len(p1)):
p_new.append(c1 * p1[j] + c2 * p2[j])
return p_new
Minimize_create_new_point = staticmethod(Minimize_create_new_point)
def Minimize_take_a_step(smplx, Kreflect, Kextend, Kcontract):
"""
Try to move away from the worst point in the simplex.
The new point will be inserted into the simplex (in place).
"""
i_low = smplx.lowest()
i_high = smplx.highest()
x_high = smplx.vertex_list[i_high]
f_high = smplx.f_list[i_high]
# Centroid of simplex excluding worst point.
x_mid = smplx.centroid(i_high)
f_mid = smplx.f(x_mid)
smplx.nfe += 1
# First, try moving away from worst point by
# reflection through centroid
x_refl = VLAB.Minimize_create_new_point(1.0+Kreflect, x_mid, -Kreflect, x_high)
f_refl = smplx.f(x_refl)
smplx.nfe += 1
if f_refl < f_mid:
# The reflection through the centroid is good,
# try to extend in the same direction.
x_ext = VLAB.Minimize_create_new_point(Kextend, x_refl, 1.0-Kextend, x_mid)
f_ext = smplx.f(x_ext)
smplx.nfe += 1
if f_ext < f_refl:
# Keep the extension because it's best.
smplx.replace_vertex(i_high, x_ext, f_ext)
else:
# Settle for the original reflection.
smplx.replace_vertex(i_high, x_refl, f_refl)
else:
# The reflection is not going in the right direction, it seems.
# See how many vertices are better than the reflected point.
count = 0
for i in range(smplx.N+1):
if smplx.f_list[i] > f_refl: count += 1
if count <= 1:
# Not too many points are higher than the original reflection.
# Try a contraction on the reflection-side of the centroid.
x_con = VLAB.Minimize_create_new_point(1.0-Kcontract, x_mid, Kcontract, x_high)
f_con = smplx.f(x_con)
smplx.nfe += 1
if f_con < f_high:
# At least we haven't gone uphill; accept.
smplx.replace_vertex(i_high, x_con, f_con)
else:
# We have not been successful in taking a single step.
# Contract the simplex about the current lowest point.
smplx.contract_about_one_point(i_low)
else:
# Retain the original reflection because there are many
# vertices with higher values of the objective function.
smplx.replace_vertex(i_high, x_refl, f_refl)
return
Minimize_take_a_step = staticmethod(Minimize_take_a_step)
def Minimize_minimize(f, x, dx=None, tol=1.0e-6,
maxfe=300, n_check=20, delta=0.001,
Kreflect=1.0, Kextend=2.0, Kcontract=0.5, args=()):
"""
Locate a minimum of the objective function, f.
Input:
f : user-specified function f(x)
x : list of N coordinates
args : Extra arguments passed to f, i.e. ``f(x, *args)''.
dx : list of N increments to apply to x when forming
the initial simplex. Their magnitudes determine the size
and shape of the initial simplex.
tol : the terminating limit for the standard-deviation
of the simplex function values.
maxfe : maximum number of function evaluations that we will allow
n_check : number of steps between convergence checks
delta : magnitude of the perturbations for checking a local minimum
and for the scale reduction when restarting
Kreflect, Kextend, Kcontract: coefficients for locating the new vertex
Output:
Returns a tuple consisting of
[0] a list of coordinates for the best x location,
corresponding to min(f(x)),
[1] the function value at that point,
[2] a flag to indicate if convergence was achieved
[3] the number of function evaluations and
[4] the number of restarts (with scale reduction)
"""
converged = 0
N = len(x)
if dx == None:
dx = [0.1] * N
smplx = Minimize_NMSimplex(x, dx, f, args)
while (not converged) and (smplx.nfe < maxfe):
# Take some steps and then check for convergence.
for i in range(n_check):
VLAB.Minimize_take_a_step(smplx, Kreflect, Kextend, Kcontract)
# Pick out the current best vertex.
i_best = smplx.lowest()
x_best = list(smplx.get_vertex(i_best))
f_best = smplx.f_list[i_best]
# Check the scatter of vertex values to see if we are
# close enough to call it quits.
mean, stddev = smplx.f_statistics()
if stddev < tol:
# All of the points are close together but we need to
# test more carefully.
converged = smplx.test_for_minimum(i_best, delta)
if not converged:
# The function evaluations are all very close together
# but we are not at a true minimum; rescale the simplex.
smplx.rescale(delta)
return x_best, f_best, converged, smplx.nfe, smplx.nrestarts
Minimize_minimize = staticmethod(Minimize_minimize)
#-----------------------------------------------------------------------
# Use a class to keep the data tidy and conveniently accessible...
class Minimize_NMSimplex:
"""
Stores the (nonlinear) simplex as a list of lists.
In an N-dimensional problem, each vertex is a list of N coordinates
and the simplex consists of N+1 vertices.
"""
def __init__(self, x, dx, f, args):
"""
Initialize the simplex.
Set up the vertices about the user-specified vertex, x,
and the set of step-sizes dx.
f is a user-specified objective function f(x).
"""
self.N = len(x)
self.vertex_list = []
self.f_list = []
self.dx = list(dx)
self.f = lambda x : f(x, *args)
self.nfe = 0
self.nrestarts = 0
for i in range(self.N + 1):
p = list(x)
if i >= 1: p[i-1] += dx[i-1]
self.vertex_list.append(p)
self.f_list.append(f(p, *args))
self.nfe += 1
def rescale(self, ratio):
"""
Pick out the current minimum and rebuild the simplex about that point.
"""
i_min = self.lowest()
for i in range(self.N):
self.dx[i] *= ratio
x = self.get_vertex(i_min)
self.vertex_list = []
self.f_list = []
for i in range(self.N + 1):
p = list(x)
if i >= 1: p[i-1] += self.dx[i-1]
self.vertex_list.append(p)
self.f_list.append(self.f(p))
self.nfe += 1
self.nrestarts += 1
return
def get_vertex(self, i):
return list(self.vertex_list[i])
def replace_vertex(self, i, x, fvalue):
self.vertex_list[i] = list(x)
self.f_list[i] = fvalue
return
def lowest(self, exclude=-1):
"""
Returns the index of the lowest vertex, excluding the one specified.
"""
if exclude == 0:
indx = 1
else:
indx = 0
lowest_f_value = self.f_list[indx]
for i in range(self.N + 1):
if i == exclude: continue
if self.f_list[i] < lowest_f_value:
lowest_f_value = self.f_list[i]
indx = i
return indx
def highest(self, exclude=-1):
"""
Returns the index of the highest vertex, excluding the one specified.
"""
if exclude == 0:
indx = 1
else:
indx = 0
highest_f_value = self.f_list[indx]
for i in range(self.N + 1):
if i == exclude: continue
if self.f_list[i] > highest_f_value:
highest_f_value = self.f_list[i]
indx = i
return indx
def f_statistics(self):
"""
Returns mean and standard deviation of the vertex fn values.
"""
sum = 0.0
for i in range(self.N + 1):
sum += self.f_list[i]
mean = sum / (self.N + 1)
sum = 0.0
for i in range(self.N +1):
diff = self.f_list[i] - mean
sum += diff * diff
std_dev = sqrt(sum / self.N)
return mean, std_dev
def centroid(self, exclude=-1):
"""
Returns the centroid of all vertices excluding the one specified.
"""
xmid = [0.0]*self.N
for i in range(self.N + 1):
if i == exclude: continue
for j in range(self.N):
xmid[j] += self.vertex_list[i][j]
for j in range(self.N):
xmid[j] /= self.N
return xmid
def contract_about_one_point(self, i_con):
"""
Contract the simplex about the vertex i_con.
"""
p_con = self.vertex_list[i_con]
for i in range(self.N + 1):
if i == i_con: continue
p = self.vertex_list[i]
for j in range(self.N):
p[j] = 0.5 * (p[j] + p_con[j])
self.f_list[i] = self.f(p)
self.nfe += 1
return
def test_for_minimum(self, i_min, delta):
"""
Perturb the minimum vertex and check that it is a local minimum.
"""
is_minimum = 1 # Assume it is true and test for failure.
f_min = self.f_list[i_min]
for j in range(self.N):
# Check either side of the minimum, perturbing one
# coordinate at a time.
p = self.get_vertex(i_min)
p[j] += self.dx[j] * delta
f_p = self.f(p)
self.nfe += 1
if f_p < f_min:
is_minimum = 0
break
p[j] -= self.dx[j] * delta * 2
f_p = self.f(p)
self.nfe += 1
if f_p < f_min:
is_minimum = 0
break
return is_minimum
#--------------------------------------------------------------------
def test_fun_1(x):
"""
Test objective function 1.
x is expected to be a list of ccordinates.
Returns a single float value.
"""
n = len(x)
sum = 0.0
for i in range(n):
sum += (x[i] - 1.0) * (x[i] - 1.0)
return sum
def test_fun_2(x):
"""
Test objective function 2.
Example 3.3 from Olsson and Nelson.
"""
x1, x2 = x # rename to match the paper
if (x1 * x1 + x2 * x2) > 1.0:
return 1.0e38
else:
yp = 53.69 + 7.26 * x1 - 10.33 * x2 + 7.22 * x1 * x1 \
+ 6.43 * x2 * x2 + 11.36 * x1 * x2
ys = 82.17 - 1.01 * x1 - 8.61 * x2 + 1.40 * x1 * x1 \
- 8.76 * x2 * x2 - 7.20 * x1 * x2
return -yp + abs(ys - 87.8)
def test_fun_3(z):
"""
Test objective function 3.
Example 3.5 from Olsson and Nelson; least-squares.
"""
from math import exp
x = [0.25, 0.50, 1.00, 1.70, 2.00, 4.00]
y = [0.25, 0.40, 0.60, 0.58, 0.54, 0.27]
a1, a2, alpha1, alpha2 = z
sum_residuals = 0.0
for i in range(len(x)):
t = x[i]
eta = a1 * exp(alpha1 * t) + a2 * exp(alpha2 * t)
r = y[i] - eta
sum_residuals += r * r
return sum_residuals
def nelmintests():
print "Begin nelmin self-test..."
print "---------------------------------------------------"
print "test 1: simple quadratic with zero at (1,1,...)"
x, fx, conv_flag, nfe, nres = VLAB.Minimize_minimize(test_fun_1, [0.0, 0.0, 0.0])
print "x=", x
print "fx=", fx
print "convergence-flag=", conv_flag
print "number-of-fn-evaluations=", nfe
print "number-of-restarts=", nres
print "---------------------------------------------------"
print "test 2: Example 3.3 in Olsson and Nelson f(0.811,-0.585)=-67.1"
x, fx, conv_flag, nfe, nres = VLAB.Minimize_minimize(test_fun_2,
[0.0, 0.0], [0.5, 0.5],
1.0e-4)
print "x=", x
print "fx=", fx
print "convergence-flag=", conv_flag
print "number-of-fn-evaluations=", nfe
print "number-of-restarts=", nres
print "---------------------------------------------------"
print "test 3: Example 3.5 in Olsson and Nelson, nonlinear least-squares"
print "f(1.801, -1.842, -0.463, -1.205)=0.0009"
x, fx, conv_flag, nfe, nres = VLAB.Minimize_minimize(test_fun_3,
[1.0, 1.0, -0.5, -2.5],
[0.1, 0.1, 0.1, 0.1],
1.0e-9, 800)
print "x=", x
print "fx=", fx
print "convergence-flag=", conv_flag
print "number-of-fn-evaluations=", nfe
print "number-of-restarts=", nres
print "---------------------------------------------------"
print "Done."
#--------------------------------------------------------------------
# objective function - requires params, x (array of angles) and y (array of obs)
def obj(p,x):
#return((((rpv(p,x)-y)**2).sum()))
fwd = rpv(p, x)
obs = x[4,:]
sse = ( (obs-fwd )**2).sum()
#plt.plot ( x[0,:],fwd, '-ko', lw=0.2)
return sse
def rpv(params,data):
# assumes data format is:
# vz va sa sa and ignores rest
if np.shape(params)[0] == 4:
rho0, k, bigtet, rhoc = params
else:
rhoc = 1.
rho0, k, bigtet = params
cosv = np.cos(np.deg2rad(data[0]))
coss = np.fabs(np.cos(np.deg2rad(data[2])))
cosv[np.where(cosv == np.float(0))] = 1e-20
coss[np.where(coss == np.float(0))] = 1e-20
sins = np.sqrt(1. - coss*coss)
sinv = np.sqrt(1. - cosv*cosv)
relphi = np.deg2rad(data[1]) - np.deg2rad(data[3])
relphi[np.where(relphi > np.pi)] = 2*np.pi - relphi[np.where(relphi > np.pi)]
cosp = -1.*np.cos(relphi)
tans = sins/coss
tanv = sinv/cosv
csmllg = coss * cosv + sins * sinv * cosp
bigg = np.sqrt(tans * tans + tanv * tanv - 2.0 * tans * tanv * cosp)
bgthsq = bigtet * bigtet
expon = k - 1.0
if expon != 0.0:
f1 = pow(coss * cosv,expon) * pow(coss + cosv,expon)
else:
f1 = 0.*(coss) + 1.0
denom = pow(1.0 + bgthsq + 2.0 * bigtet * csmllg,1.5)
f2 = np.copy(denom)
f2[np.where(denom != 0.)] = (1.0 - bgthsq) / denom[np.where(denom != 0.)]
f2[np.where(denom == 0.)] = (1.0 - bgthsq)*1e20
f3 = (1.0 + ((1 - rhoc) / (1.0 + bigg)))
return(rho0 * f1 * f2 * f3)
def main():
dataf = 'rpv.rami.2/result.HET01_DIS_UNI_NIR_20.obj.brdf.dat'
wbfile = 'wb.MSI.dat'
wbNum = 3 # 665 nm in this case
verbose = 1
plot = 1
show = 0
if options.dataf: dataf = options.dataf
if options.wbfile: wbfile = options.wbfile
if options.wb: wbNum = options.wb
if options.v: verbose = 1
wb = np.genfromtxt(wbfile,comments='#',unpack=True)[1]
data = np.genfromtxt(dataf,comments='#',unpack=True)
# check shape of 2 data files i.e. that there are same no. of wbs on each line of datafile ( + 4 angles)
if wb.shape[0] != data.shape[0]-4:
sys.stderr.write('%s: no of wavebands different in brdf file %s and wb file %s\n'%(sys.argv[0],dataf,wbfile))
sys.exit(1)
# rpv params
rho0, k, bigtet, rhoc = 0.03, 1.2, 0.1, 0.2
if options.three:
params = [rho0, k, bigtet]
else:
params = [rho0, k, bigtet, rhoc]
# test
#r = rpv(params,data)
#plt.plot(data[0,:15],data[5,:15],'ko')
#plt.plot(data[0,:15],r[0:15],'r-')
#plt.show()
#plt.close()
# RAMI test: see http://rami-benchmark.jrc.ec.europa.eu/HTML/DEFINITIONS/DEFINITIONS.php#RPV
#rho0, k, bigtet, rhoc = 0.075, 0.55, -0.25, 0.075
#params = [0.075, 0.55, -0.25, 0.075]
#angles = np.array([np.arange(-80,80,2), np.zeros((80,)), np.ones((80,))*20.,np.ones((80,))*180.])
#angles = np.array([np.arange(-80,80,2), np.zeros((80,)), np.ones((80,))*50.,np.ones((80,))*180.])
#angles[3,np.where(angles[0]>0)] = 0.
#res = rpv(params,angles)
#plt.plot(angles[0], res)
#plt.show()
#plt.close()
if options.paramfile:
opdat = options.paramfile
else:
opdat = dataf + '.params.dat'
if verbose: sys.stderr.write('%s: saving params to %s\n'%(sys.argv[0], opdat))
fd = os.open(opdat, os.O_CREAT | os.O_WRONLY | os.O_TRUNC)
opfp = os.fdopen(fd,'w')
if options.three:
#opfp.write('# wb RMSE rho0 k bigtet\n')
opfp.write('# wb rho0 k bigtet\n')
else:
#opfp.write('# wb RMSE rho0 k bigtet rhoc\n')
opfp.write('# wb rho0 k bigtet rhoc\n')
ymin, ymax = (0, 0.25)
xmin, xmax = (-75., 75)
#invert test
#which wband?
# do per band
# test
#np.savetxt('xxxtest.in.dat',data.T)
for wbNum, band in enumerate(wb):
if verbose: sys.stderr.write('%s: doing band %i (%f)\n'%(sys.argv[0], wbNum, band))
invdata = np.zeros((5,data.shape[1]))
invdata[0:4] = np.copy(data[0:4])
invdata[4] = np.copy(data[4 + wbNum])
#invdata = np.zeros((5,15))
#invdata[0:4] = np.copy(data[0:4,0:15])
#invdata[4] = np.copy(data[4 + wbNum,0:15])
# now do inversion
porig = params
p_est = params
nm = None
if debug:
nm = VLAB.Minimize_minimize(obj, params, args=(invdata,))[0]
p_est = scipy.optimize.fmin(obj,params,args=(invdata,))
if debug:
print " scipy.optimize.fmin (old) | nelmin.minimize (new)"
print "-------------------------------------------------------"
for values in zip(p_est, nm):
print "% 0.23f | % 0.23f" % values
#p_est = scipy.optimize.fmin_l_bfgs_b(obj,params,args=(invdata,),approx_grad=1, disp=1)
#p_est = scipy.optimize.fmin_l_bfgs_b(obj,p_est,args=(invdata,),approx_grad=1)
if options.three:
p_est = scipy.optimize.fmin_l_bfgs_b(obj,p_est,args=(invdata,),approx_grad=1, bounds=((0., None), (0., None),(None, None)))
else:
# set param ranges for rho0, k, bigtet, rhoc
#p_est = scipy.optimize.fmin_l_bfgs_b(obj,p_est,args=(invdata,),approx_grad=1, bounds=((0., None), (0., None),(None, None),(0., 20.)))
p_est = scipy.optimize.fmin_l_bfgs_b(obj,p_est,args=(invdata,),approx_grad=1, bounds=((0., None), (0., None),(None, None),(None, None)))
# r is fwd-modelled refl based on rpv params
r = rpv(p_est[0],invdata)
rmse = np.sqrt(((r - invdata[4])**2).sum())
# test o/p
#np.savetxt('xxxtest.orig.dat',invdata.T)
#np.savetxt('xxxtest.fwd.dat',r.T)
#sys.exit(1)
if options.three:
#opfp.write('%.1f %.8f %.8f %.8f %.8f\n'%(band, rmse, p_est[0][0],p_est[0][1],p_est[0][2]))
opfp.write('%.1f %.8f %.8f %.8f\n'%(band, p_est[0][0],p_est[0][1],p_est[0][2]))
else:
#opfp.write('%.1f %.8f %.8f %.8f %.8f %.8f\n'%(band, rmse, p_est[0][0],p_est[0][1],p_est[0][2],p_est[0][3]))
opfp.write('%.1f %.8f %.8f %.8f %.8f\n'%(band, p_est[0][0],p_est[0][1],p_est[0][2],p_est[0][3]))
#plt.plot(data[0,:15],data[5,:15],'ko')
#plt.plot(data[0,:15],r[0:15],'r-')
if options.plot:
if options.plotfile:
opplot = options.plotfile + '.inv.wb.' + str(wbNum) + '.png'
else:
opplot = dataf + '.inv.wb.' + str(wbNum) + '.png'
if verbose: sys.stderr.write('%s: plotting to %s\n'%(sys.argv[0], opplot))
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel('vza (deg)',fontsize=14)
ax.set_ylabel(r'$\rho$',fontsize=20)
ax.set_ylim((ymin, ymax))
ax.set_xlim((xmin, xmax))
ax.text(0.05,0.85,'rmse = %.6f'%(rmse), transform = ax.transAxes)
ax.text(0.05,0.8,'rho0 = %.4f'%(p_est[0][0]),transform = ax.transAxes)
ax.text(0.05,0.75,'k = %.4f'%(p_est[0][1]),transform = ax.transAxes)
ax.text(0.05,0.7,'bigtet = %.4f'%(p_est[0][2]),transform = ax.transAxes)
if not options.three:
ax.text(0.05,0.65,'rhoc = %.4f'%(p_est[0][3]),transform = ax.transAxes)
ax.plot(invdata[0],invdata[4],'ko',label='original')
ax.plot(invdata[0],r,'rx',label='inverted')
ax.legend(loc=1)
if show:
plt.show()
plt.savefig(opplot)
#plt.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-data", dest="dataf", help="data file", metavar="FILE")
parser.add_argument("-wbfile", dest="wbfile", help="data file", metavar="FILE")
parser.add_argument("-plotfile", dest="plotfile", help="plot file", metavar="FILE")
parser.add_argument("-paramfile", dest="paramfile", help="param file", metavar="FILE")
parser.add_argument("-wb", dest="wb", help="wb no.")
parser.add_argument("-plot", action="store_true", help="plot on")
parser.add_argument("-v", action="store_true", help="verbose on")
parser.add_argument("-three", action="store_true", help="3 params only")
options = parser.parse_args()
nelmintests()
main()
| gpl-3.0 |
h2educ/scikit-learn | examples/linear_model/plot_ard.py | 248 | 2622 | """
==================================================
Automatic Relevance Determination Regression (ARD)
==================================================
Fit regression model with Bayesian Ridge Regression.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
The histogram of the estimated weights is very peaked, as a sparsity-inducing
prior is implied on the weights.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import ARDRegression, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weights
# Parameters of the example
np.random.seed(0)
n_samples, n_features = 100, 100
# Create Gaussian data
X = np.random.randn(n_samples, n_features)
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noite with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the ARD Regression
clf = ARDRegression(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot the true weights, the estimated weights and the histogram of the
# weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, 'b-', label="ARD estimate")
plt.plot(ols.coef_, 'r--', label="OLS estimate")
plt.plot(w, 'g-', label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, log=True)
plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
'ro', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
| bsd-3-clause |
maheshakya/scikit-learn | sklearn/linear_model/ridge.py | 2 | 38586 | """
Ridge regression
"""
# Author: Mathieu Blondel <[email protected]>
# Reuben Fletcher-Costin <[email protected]>
# Fabian Pedregosa <[email protected]>
# Michael Eickenberg <[email protected]>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy import linalg
from scipy import sparse
from scipy.sparse import linalg as sp_linalg
from .base import LinearClassifierMixin, LinearModel
from ..base import RegressorMixin
from ..utils.extmath import safe_sparse_dot
from ..utils import check_X_y
from ..utils import compute_class_weight
from ..utils import column_or_1d
from ..preprocessing import LabelBinarizer
from ..grid_search import GridSearchCV
from ..externals import six
from ..metrics.scorer import check_scoring
def _solve_sparse_cg(X, y, alpha, max_iter=None, tol=1e-3, verbose=0):
n_samples, n_features = X.shape
X1 = sp_linalg.aslinearoperator(X)
coefs = np.empty((y.shape[1], n_features))
if n_features > n_samples:
def create_mv(curr_alpha):
def _mv(x):
return X1.matvec(X1.rmatvec(x)) + curr_alpha * x
return _mv
else:
def create_mv(curr_alpha):
def _mv(x):
return X1.rmatvec(X1.matvec(x)) + curr_alpha * x
return _mv
for i in range(y.shape[1]):
y_column = y[:, i]
mv = create_mv(alpha[i])
if n_features > n_samples:
# kernel ridge
# w = X.T * inv(X X^t + alpha*Id) y
C = sp_linalg.LinearOperator(
(n_samples, n_samples), matvec=mv, dtype=X.dtype)
coef, info = sp_linalg.cg(C, y_column, tol=tol)
coefs[i] = X1.rmatvec(coef)
else:
# linear ridge
# w = inv(X^t X + alpha*Id) * X.T y
y_column = X1.rmatvec(y_column)
C = sp_linalg.LinearOperator(
(n_features, n_features), matvec=mv, dtype=X.dtype)
coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter,
tol=tol)
if info < 0:
raise ValueError("Failed with error code %d" % info)
if max_iter is None and info > 0 and verbose:
warnings.warn("sparse_cg did not converge after %d iterations." %
info)
return coefs
def _solve_lsqr(X, y, alpha, max_iter=None, tol=1e-3):
n_samples, n_features = X.shape
coefs = np.empty((y.shape[1], n_features))
# According to the lsqr documentation, alpha = damp^2.
sqrt_alpha = np.sqrt(alpha)
for i in range(y.shape[1]):
y_column = y[:, i]
coefs[i] = sp_linalg.lsqr(X, y_column, damp=sqrt_alpha[i],
atol=tol, btol=tol, iter_lim=max_iter)[0]
return coefs
def _solve_cholesky(X, y, alpha, sample_weight=None):
# w = inv(X^t X + alpha*Id) * X.T y
n_samples, n_features = X.shape
n_targets = y.shape[1]
has_sw = sample_weight is not None
if has_sw:
sample_weight = sample_weight * np.ones(n_samples)
sample_weight_matrix = sparse.dia_matrix((sample_weight, 0),
shape=(n_samples, n_samples))
weighted_X = safe_sparse_dot(sample_weight_matrix, X)
A = safe_sparse_dot(weighted_X.T, X, dense_output=True)
Xy = safe_sparse_dot(weighted_X.T, y, dense_output=True)
else:
A = safe_sparse_dot(X.T, X, dense_output=True)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]])
if one_alpha:
A.flat[::n_features + 1] += alpha[0]
return linalg.solve(A, Xy, sym_pos=True,
overwrite_a=True).T
else:
coefs = np.empty([n_targets, n_features])
for coef, target, current_alpha in zip(coefs, Xy.T, alpha):
A.flat[::n_features + 1] += current_alpha
coef[:] = linalg.solve(A, target, sym_pos=True,
overwrite_a=False).ravel()
A.flat[::n_features + 1] -= current_alpha
return coefs
def _solve_cholesky_kernel(K, y, alpha, sample_weight=None):
# dual_coef = inv(X X^t + alpha*Id) y
n_samples = K.shape[0]
n_targets = y.shape[1]
one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]])
has_sw = sample_weight is not None
if has_sw:
sw = np.sqrt(np.atleast_1d(sample_weight))
y = y * sw[:, np.newaxis]
K *= np.outer(sw, sw)
if one_alpha:
# Only one penalty, we can solve multi-target problems in one time.
K.flat[::n_samples + 1] += alpha[0]
dual_coef = linalg.solve(K, y, sym_pos=True, overwrite_a=True)
# K is expensive to compute and store in memory so change it back in
# case it was user-given.
K.flat[::n_samples + 1] -= alpha[0]
if has_sw:
dual_coef *= sw[:, np.newaxis]
return dual_coef
else:
# One penalty per target. We need to solve each target separately.
dual_coefs = np.empty([n_targets, n_samples])
for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha):
K.flat[::n_samples + 1] += current_alpha
dual_coef[:] = linalg.solve(K, target, sym_pos=True,
overwrite_a=False).ravel()
K.flat[::n_samples + 1] -= current_alpha
if has_sw:
dual_coefs *= sw[np.newaxis, :]
return dual_coefs.T
def _solve_svd(X, y, alpha):
U, s, Vt = linalg.svd(X, full_matrices=False)
idx = s > 1e-15 # same default value as scipy.linalg.pinv
s_nnz = s[idx][:, np.newaxis]
UTy = np.dot(U.T, y)
d = np.zeros((s.size, alpha.size))
d[idx] = s_nnz / (s_nnz ** 2 + alpha)
d_UT_y = d * UTy
return np.dot(Vt.T, d_UT_y).T
def _deprecate_dense_cholesky(solver):
if solver == 'dense_cholesky':
warnings.warn(DeprecationWarning(
"The name 'dense_cholesky' is deprecated and will "
"be removed in 0.17. Use 'cholesky' instead. "))
solver = 'cholesky'
return solver
def ridge_regression(X, y, alpha, sample_weight=None, solver='auto',
max_iter=None, tol=1e-3, verbose=0):
"""Solve the ridge equation by the method of normal equations.
Parameters
----------
X : {array-like, sparse matrix, LinearOperator},
shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
alpha : {float, array-like},
shape = [n_targets] if array-like
The l_2 penalty to be used. If an array is passed, penalties are
assumed to be specific to targets
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample. If sample_weight is set, then
the solver will automatically be set to 'cholesky'
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution via a Cholesky decomposition of
dot(X.T, X)
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
All three solvers support both dense and sparse data.
tol : float
Precision of the solution.
verbose : int
Verbosity level. Setting verbose > 0 will display additional information
depending on the solver used.
Returns
-------
coef : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
Notes
-----
This function won't compute the intercept.
"""
n_samples, n_features = X.shape
if y.ndim > 2:
raise ValueError("Target y has the wrong shape %s" % str(y.shape))
ravel = False
if y.ndim == 1:
y = y.reshape(-1, 1)
ravel = True
n_samples_, n_targets = y.shape
if n_samples != n_samples_:
raise ValueError("Number of samples in X and y does not correspond:"
" %d != %d" % (n_samples, n_samples_))
has_sw = sample_weight is not None
solver = _deprecate_dense_cholesky(solver)
if solver == 'auto':
# cholesky if it's a dense array and cg in
# any other case
if not sparse.issparse(X) or has_sw:
solver = 'cholesky'
else:
solver = 'sparse_cg'
elif solver == 'lsqr' and not hasattr(sp_linalg, 'lsqr'):
warnings.warn("""lsqr not available on this machine, falling back
to sparse_cg.""")
solver = 'sparse_cg'
if has_sw:
if np.atleast_1d(sample_weight).ndim > 1:
raise ValueError("Sample weights must be 1D array or scalar")
if solver != "cholesky":
warnings.warn("sample_weight and class_weight not"
" supported in %s, fall back to "
"cholesky." % solver)
solver = 'cholesky'
# There should be either 1 or n_targets penalties
alpha = np.asarray(alpha).ravel()
if alpha.size not in [1, n_targets]:
raise ValueError("Number of targets and number of penalties "
"do not correspond: %d != %d"
% (alpha.size, n_targets))
if alpha.size == 1 and n_targets > 1:
alpha = np.repeat(alpha, n_targets)
if solver not in ('sparse_cg', 'cholesky', 'svd', 'lsqr'):
raise ValueError('Solver %s not understood' % solver)
if solver == 'sparse_cg':
coef = _solve_sparse_cg(X, y, alpha, max_iter, tol, verbose)
elif solver == "lsqr":
coef = _solve_lsqr(X, y, alpha, max_iter, tol)
elif solver == 'cholesky':
if n_features > n_samples:
K = safe_sparse_dot(X, X.T, dense_output=True)
try:
dual_coef = _solve_cholesky_kernel(K, y, alpha,
sample_weight)
coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
else:
try:
coef = _solve_cholesky(X, y, alpha, sample_weight)
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
if solver == 'svd':
if sparse.issparse(X):
raise TypeError('SVD solver does not support sparse'
' inputs currently')
coef = _solve_svd(X, y, alpha)
if ravel:
# When y was passed as a 1d-array, we flatten the coefficients.
coef = coef.ravel()
return coef
class _BaseRidge(six.with_metaclass(ABCMeta, LinearModel)):
@abstractmethod
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto"):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.max_iter = max_iter
self.tol = tol
self.solver = solver
def fit(self, X, y, sample_weight=None):
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float, multi_output=True)
if ((sample_weight is not None) and
np.atleast_1d(sample_weight).ndim > 1):
raise ValueError("Sample weights must be 1D array or scalar")
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
solver = _deprecate_dense_cholesky(self.solver)
self.coef_ = ridge_regression(X, y,
alpha=self.alpha,
sample_weight=sample_weight,
max_iter=self.max_iter,
tol=self.tol,
solver=solver)
self._set_intercept(X_mean, y_mean, X_std)
return self
class Ridge(_BaseRidge, RegressorMixin):
"""Linear least squares with l2 regularization.
This model solves a regression model where the loss function is
the linear least squares function and regularization is given by
the l2-norm. Also known as Ridge Regression or Tikhonov regularization.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Parameters
----------
alpha : {float, array-like}
shape = [n_targets]
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
All three solvers support both dense and sparse data.
tol : float
Precision of the solution.
Attributes
----------
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
See also
--------
RidgeClassifier, RidgeCV
Examples
--------
>>> from sklearn.linear_model import Ridge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = Ridge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, solver='auto', tol=0.001)
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto"):
super(Ridge, self).__init__(alpha=alpha, fit_intercept=fit_intercept,
normalize=normalize, copy_X=copy_X,
max_iter=max_iter, tol=tol, solver=solver)
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample
Returns
-------
self : returns an instance of self.
"""
return super(Ridge, self).fit(X, y, sample_weight=sample_weight)
class RidgeClassifier(LinearClassifierMixin, _BaseRidge):
"""Classifier using Ridge regression.
Parameters
----------
alpha : float
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC.
class_weight : dict, optional
Weights associated with classes in the form
``{class_label : weight}``. If not given, all classes are
supposed to have weight one.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set to false, no
intercept will be used in calculations (e.g. data is expected to be
already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational
routines. 'svd' will use a Singular value decomposition to obtain
the solution, 'cholesky' will use the standard
scipy.linalg.solve function, 'sparse_cg' will use the
conjugate gradient solver as found in
scipy.sparse.linalg.cg while 'auto' will chose the most
appropriate depending on the matrix X. 'lsqr' uses
a direct regularized least-squares routine provided by scipy.
tol : float
Precision of the solution.
Attributes
----------
coef_ : array, shape = [n_features] or [n_classes, n_features]
Weight vector(s).
See also
--------
Ridge, RidgeClassifierCV
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, class_weight=None,
solver="auto"):
super(RidgeClassifier, self).__init__(
alpha=alpha, fit_intercept=fit_intercept, normalize=normalize,
copy_X=copy_X, max_iter=max_iter, tol=tol, solver=solver)
self.class_weight = class_weight
def fit(self, X, y):
"""Fit Ridge regression model.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples,n_features]
Training data
y : array-like, shape = [n_samples]
Target values
Returns
-------
self : returns an instance of self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
cw = compute_class_weight(self.class_weight,
self.classes_, y)
# get the class weight corresponding to each sample
sample_weight = cw[np.searchsorted(self.classes_, y)]
else:
sample_weight = None
super(RidgeClassifier, self).fit(X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
class _RidgeGCV(LinearModel):
"""Ridge regression with built-in Generalized Cross-Validation
It allows efficient Leave-One-Out cross-validation.
This class is not intended to be used directly. Use RidgeCV instead.
Notes
-----
We want to solve (K + alpha*Id)c = y,
where K = X X^T is the kernel matrix.
Let G = (K + alpha*Id)^-1.
Dual solution: c = Gy
Primal solution: w = X^T c
Compute eigendecomposition K = Q V Q^T.
Then G = Q (V + alpha*Id)^-1 Q^T,
where (V + alpha*Id) is diagonal.
It is thus inexpensive to inverse for many alphas.
Let loov be the vector of prediction values for each example
when the model was fitted with all examples but this example.
loov = (KGY - diag(KG)Y) / diag(I-KG)
Let looe be the vector of prediction errors for each example
when the model was fitted with all examples but this example.
looe = y - loov = c / diag(G)
References
----------
http://cbcl.mit.edu/projects/cbcl/publications/ps/MIT-CSAIL-TR-2007-025.pdf
http://www.mit.edu/~9.520/spring07/Classes/rlsslides.pdf
"""
def __init__(self, alphas=[0.1, 1.0, 10.0],
fit_intercept=True, normalize=False,
scoring=None, copy_X=True,
gcv_mode=None, store_cv_values=False):
self.alphas = np.asarray(alphas)
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.copy_X = copy_X
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def _pre_compute(self, X, y):
# even if X is very sparse, K is usually very dense
K = safe_sparse_dot(X, X.T, dense_output=True)
v, Q = linalg.eigh(K)
QT_y = np.dot(Q.T, y)
return v, Q, QT_y
def _decomp_diag(self, v_prime, Q):
# compute diagonal of the matrix: dot(Q, dot(diag(v_prime), Q^T))
return (v_prime * Q ** 2).sum(axis=-1)
def _diag_dot(self, D, B):
# compute dot(diag(D), B)
if len(B.shape) > 1:
# handle case where B is > 1-d
D = D[(slice(None), ) + (np.newaxis, ) * (len(B.shape) - 1)]
return D * B
def _errors(self, alpha, y, v, Q, QT_y):
# don't construct matrix G, instead compute action on y & diagonal
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values(self, alpha, y, v, Q, QT_y):
# don't construct matrix G, instead compute action on y & diagonal
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return y - (c / G_diag), c
def _pre_compute_svd(self, X, y):
if sparse.issparse(X):
raise TypeError("SVD not supported for sparse matrices")
U, s, _ = linalg.svd(X, full_matrices=0)
v = s ** 2
UT_y = np.dot(U.T, y)
return v, U, UT_y
def _errors_svd(self, alpha, y, v, U, UT_y):
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case where y is 2-d
G_diag = G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values_svd(self, alpha, y, v, U, UT_y):
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case when y is 2-d
G_diag = G_diag[:, np.newaxis]
return y - (c / G_diag), c
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float, multi_output=True)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = LinearModel._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
gcv_mode = self.gcv_mode
with_sw = len(np.shape(sample_weight))
if gcv_mode is None or gcv_mode == 'auto':
if sparse.issparse(X) or n_features > n_samples or with_sw:
gcv_mode = 'eigen'
else:
gcv_mode = 'svd'
elif gcv_mode == "svd" and with_sw:
# FIXME non-uniform sample weights not yet supported
warnings.warn("non-uniform sample weights unsupported for svd, "
"forcing usage of eigen")
gcv_mode = 'eigen'
if gcv_mode == 'eigen':
_pre_compute = self._pre_compute
_errors = self._errors
_values = self._values
elif gcv_mode == 'svd':
# assert n_samples >= n_features
_pre_compute = self._pre_compute_svd
_errors = self._errors_svd
_values = self._values_svd
else:
raise ValueError('bad gcv_mode "%s"' % gcv_mode)
v, Q, QT_y = _pre_compute(X, y)
n_y = 1 if len(y.shape) == 1 else y.shape[1]
cv_values = np.zeros((n_samples * n_y, len(self.alphas)))
C = []
scorer = check_scoring(self, scoring=self.scoring, allow_none=True,
score_overrides_loss=True)
error = scorer is None
for i, alpha in enumerate(self.alphas):
weighted_alpha = (sample_weight * alpha
if sample_weight is not None
else alpha)
if error:
out, c = _errors(weighted_alpha, y, v, Q, QT_y)
else:
out, c = _values(weighted_alpha, y, v, Q, QT_y)
cv_values[:, i] = out.ravel()
C.append(c)
if error:
best = cv_values.mean(axis=0).argmin()
else:
# The scorer want an object that will make the predictions but
# they are already computed efficiently by _RidgeGCV. This
# identity_estimator will just return them
def identity_estimator():
pass
identity_estimator.decision_function = lambda y_predict: y_predict
identity_estimator.predict = lambda y_predict: y_predict
out = [scorer(identity_estimator, y.ravel(), cv_values[:, i])
for i in range(len(self.alphas))]
best = np.argmax(out)
self.alpha_ = self.alphas[best]
self.dual_coef_ = C[best]
self.coef_ = safe_sparse_dot(self.dual_coef_.T, X)
self._set_intercept(X_mean, y_mean, X_std)
if self.store_cv_values:
if len(y.shape) == 1:
cv_values_shape = n_samples, len(self.alphas)
else:
cv_values_shape = n_samples, n_y, len(self.alphas)
self.cv_values_ = cv_values.reshape(cv_values_shape)
return self
class _BaseRidgeCV(LinearModel):
def __init__(self, alphas=np.array([0.1, 1.0, 10.0]),
fit_intercept=True, normalize=False, scoring=None,
cv=None, gcv_mode=None,
store_cv_values=False):
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.cv = cv
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
if self.cv is None:
estimator = _RidgeGCV(self.alphas,
fit_intercept=self.fit_intercept,
normalize=self.normalize,
scoring=self.scoring,
gcv_mode=self.gcv_mode,
store_cv_values=self.store_cv_values)
estimator.fit(X, y, sample_weight=sample_weight)
self.alpha_ = estimator.alpha_
if self.store_cv_values:
self.cv_values_ = estimator.cv_values_
else:
if self.store_cv_values:
raise ValueError("cv!=None and store_cv_values=True "
" are incompatible")
parameters = {'alpha': self.alphas}
# FIXME: sample_weight must be split into training/validation data
# too!
#fit_params = {'sample_weight' : sample_weight}
fit_params = {}
gs = GridSearchCV(Ridge(fit_intercept=self.fit_intercept),
parameters, fit_params=fit_params, cv=self.cv)
gs.fit(X, y)
estimator = gs.best_estimator_
self.alpha_ = gs.best_estimator_.alpha
self.coef_ = estimator.coef_
self.intercept_ = estimator.intercept_
return self
class RidgeCV(_BaseRidgeCV, RegressorMixin):
"""Ridge regression with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to ``(2*C)^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : integer or cross-validation generator, optional
If None, Generalized Cross-Validation (efficient Leave-One-Out)
will be used.
If an integer is passed, it is the number of folds for KFold cross
validation. Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
gcv_mode : {None, 'auto', 'svd', eigen'}, optional
Flag indicating which strategy to use when performing
Generalized Cross-Validation. Options are::
'auto' : use svd if n_samples > n_features or when X is a sparse
matrix, otherwise use eigen
'svd' : force computation via singular value decomposition of X
(does not work for sparse matrices)
'eigen' : force computation via eigendecomposition of X^T X
The 'auto' mode is the default and is intended to pick the cheaper
option of the two depending upon the shape and format of the training
data.
store_cv_values : boolean, default=False
Flag indicating if the cross-validation values corresponding to
each alpha should be stored in the `cv_values_` attribute (see
below). This flag is only compatible with `cv=None` (i.e. using
Generalized Cross-Validation).
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_targets, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and \
`cv=None`). After `fit()` has been called, this attribute will \
contain the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
alpha_ : float
Estimated regularization parameter.
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeClassifierCV: Ridge classifier with built-in cross validation
"""
pass
class RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV):
"""Ridge classifier with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation. Currently, only the n_features >
n_samples case is handled efficiently.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to ``(2*C)^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : cross-validation generator, optional
If None, Generalized Cross-Validation (efficient Leave-One-Out)
will be used.
class_weight : dict, optional
Weights associated with classes in the form
``{class_label : weight}``. If not given, all classes are
supposed to have weight one.
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_responses, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and
`cv=None`). After `fit()` has been called, this attribute will contain \
the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
alpha_ : float
Estimated regularization parameter
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeCV: Ridge regression with built-in cross validation
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alphas=np.array([0.1, 1.0, 10.0]), fit_intercept=True,
normalize=False, scoring=None, cv=None, class_weight=None):
super(RidgeClassifierCV, self).__init__(
alphas=alphas, fit_intercept=fit_intercept, normalize=normalize,
scoring=scoring, cv=cv)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit the ridge classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
Returns
-------
self : object
Returns self.
"""
if sample_weight is None:
sample_weight = 1.
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
cw = compute_class_weight(self.class_weight,
self.classes_, Y)
# modify the sample weights with the corresponding class weight
sample_weight *= cw[np.searchsorted(self.classes_, y)]
_BaseRidgeCV.fit(self, X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
| bsd-3-clause |
massmutual/scikit-learn | sklearn/ensemble/__init__.py | 217 | 1307 | """
The :mod:`sklearn.ensemble` module includes ensemble-based methods for
classification and regression.
"""
from .base import BaseEnsemble
from .forest import RandomForestClassifier
from .forest import RandomForestRegressor
from .forest import RandomTreesEmbedding
from .forest import ExtraTreesClassifier
from .forest import ExtraTreesRegressor
from .bagging import BaggingClassifier
from .bagging import BaggingRegressor
from .weight_boosting import AdaBoostClassifier
from .weight_boosting import AdaBoostRegressor
from .gradient_boosting import GradientBoostingClassifier
from .gradient_boosting import GradientBoostingRegressor
from .voting_classifier import VotingClassifier
from . import bagging
from . import forest
from . import weight_boosting
from . import gradient_boosting
from . import partial_dependence
__all__ = ["BaseEnsemble",
"RandomForestClassifier", "RandomForestRegressor",
"RandomTreesEmbedding", "ExtraTreesClassifier",
"ExtraTreesRegressor", "BaggingClassifier",
"BaggingRegressor", "GradientBoostingClassifier",
"GradientBoostingRegressor", "AdaBoostClassifier",
"AdaBoostRegressor", "VotingClassifier",
"bagging", "forest", "gradient_boosting",
"partial_dependence", "weight_boosting"]
| bsd-3-clause |
dalejung/trtools | trtools/monkey/__init__.py | 1 | 7491 | import warnings
import functools
import collections
def patch(classes, name=None, override=False):
"""
Notes:
By default this will not patch an attr twice. It keeps track of this by appending an
_old_[attr]. Might be better to just a dictionary around.
If we're patching a new attr, we still set None to the _old_[attr] to keep track of
patching
"""
if not isinstance(classes, list):
classes = [classes]
def decorator(func):
for cls in classes:
func_name = name and name or func.__name__
old_func_name = '_old_'+func_name
old_func = getattr(cls, old_func_name, None)
has_old = hasattr(cls, old_func_name)
if has_old and not override:
warnings.warn("{0} was already monkey patched. Detected _old_ func".format(func_name))
continue
# do not override old_func_name, which should always point to original
if not has_old:
# Make sure to add a None to keep track of whether we patched or not
old_func = getattr(cls, func_name, None)
setattr(cls, old_func_name, old_func)
setattr(cls, func_name, func)
return func
return decorator
def patch_prop(classes, name=None):
"""
Wraps around patch and sends it a property(func)
"""
if not isinstance(classes, list):
classes = [classes]
def decorator(func):
for cls in classes:
prop_name = name and name or func.__name__
prop = property(func)
patch(cls, name=prop_name)(prop)
return func
return decorator
def patcher(classes, func, name=None):
if not isinstance(classes, list):
classes = [classes]
for cls in classes:
func_name = name and name or func.__name__
old_func_name = '_old_'+func_name
if hasattr(cls, old_func_name):
warnings.warn("{0} was already monkey patched. Detected _old_ func".format(func_name))
continue
if hasattr(cls, func_name):
old_func = getattr(cls, func_name)
setattr(cls, old_func_name, old_func)
setattr(cls, func_name, func)
return func
class AttrNameSpace(object):
"""
AttrNameSpace does not define the namespace. It is merely a middleman that translates
attribute access for obj.attr and relays it to the proper endpoint.
Note on wrapping. AttrNameSpace will try to replicate a normal method call by passing in
self.obj into function calls. That means that a regular NameSpace.method will receive
two parameters, (AttrNameSpace.endpoint, AttrNameSpace.obj).
If there is no reason for access to the endpoint, then make its methods static.
That way, the method will only receive the .obj and will acts as if it's a normal method of obj.
The first purpose of this class is for organization. It is taking flat namespace with 100 methods,
and splitting some off into their own namespace without altering their functionality.
Storing state in the endpoint and accessing it is permissable, but not the primary function.
"""
def __init__(self, obj, endpoint, name):
"""
Parameters
----------
obj : object
endpoint : object
name : string
The attr name of obj that endpoint takes over.
"""
self.obj = obj
self.endpoint = endpoint
self.name = name
self.wrap = True
if hasattr(self.endpoint, '__getattr__'):
# don't wrap, assume endpoint is wrapping
self.wrap = False
self.endpoint.obj = obj
# on creation, store easy ref to overriden function if it exists
self._old_func = self._get_old_func()
def __getattr__(self, name):
func = getattr(self.endpoint, name)
if self.wrap:
func = functools.partial(func, self.obj)
return func
def method_attrs(self):
import inspect
attrs = inspect.getmembers(self.endpoint)
attrs = [attr for attr, type in attrs if not attr.startswith('_')]
return attrs
def attrs(self):
attrs = []
if self.wrap:
attrs = self.method_attrs()
if not self.wrap and hasattr(self.endpoint, 'attrs'):
attrs = self.endpoint.attrs()
return attrs
def __repr__(self):
out = "AttrNameSpace:\n"
attrs = self.attrs()
if attrs:
out += "\n".join(attrs)
# add info for old_func
if self._old_func:
out += "\n\nOverridden method: \n"
out += 'Docstring:\t'+self._old_func.__doc__
# TODO add more info
return out
def _get_old_func(self):
func_name = '_old_' + self.name
func = getattr(self.obj, func_name, None)
return func
def __call__(self, *args, **kwargs):
func = self._old_func
if func and isinstance(func, collections.Callable):
return func(*args, **kwargs)
raise TypeError("{attr} on {obj} was not callable".format(attr=self.name, obj=str(self.obj)))
def attr_namespace(target, name):
"""
Use to create Attribute Namespace.
@attr_namespace(pd.DataFrame, 'ret')
class Returns(object):
def log_returns(df):
return np.log(df.close / df.close.shift(1))
You could access via df.ret.log_returns
"""
def class_wrap(cls):
def attr_get(self):
# create namespace
attr_ns = AttrNameSpace(self, cls(), name)
return attr_ns
patch_prop(target, name)(attr_get)
return cls
return class_wrap
class AttrProxy(object):
"""
Wraps an object and exposes its attribute which are
run through a callback.
This is a utility class for wrapping other objects. Usually
one would override the __getattr__ and delegate to the wrapped
class. However, that breaks down when the attribute is an object,
and you want to wrap that attribute's methods.
The use case is for ColumnPanel which allows things like
cp.tail(10) which calls tail(10) for each frame. Currently,
this does not work for nested objects. cp.ret.log_returns()
would not work
"""
def __init__(self, name, obj, callback):
self.name = name
self.obj = obj
self.attr = getattr(obj, name)
self.callback = callback
def __getattr__(self, key):
if hasattr(self.attr, key):
fullattr = '.'.join([self.name, key])
return self.callback(self.obj, fullattr)
raise AttributeError()
# IPYTYHON
# Autocomplete the target endpoint
def install_ipython_completers(): # pragma: no cover
from pandas import compat
from IPython.utils.generics import complete_object
@complete_object.when_type(AttrNameSpace)
def complete_column_panel(self, prev_completions):
return [c for c in self.attrs() \
if isinstance(c, str) and compat.isidentifier(c)]
# Importing IPython brings in about 200 modules, so we want to avoid it unless
# we're in IPython (when those modules are loaded anyway).
import sys
if "IPython" in sys.modules: # pragma: no cover
try:
install_ipython_completers()
except Exception:
pass
| mit |
amyvmiwei/neon | neon/diagnostics/visualize_rnn.py | 4 | 6174 | # ----------------------------------------------------------------------------
# Copyright 2014 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Visualization for recurrent neural networks
"""
import numpy as np
from neon.util.compat import range
class VisualizeRNN(object):
"""
Visualzing weight matrices during training
"""
def __init__(self):
import matplotlib.pyplot
self.plt = matplotlib.pyplot
self.plt.interactive(1)
def plot_weights(self, weights_in, weights_rec, weights_out):
"""
Visizualize the three weight matrices after every epoch. Serves to
check that weights are structured, not exploding, and get upated
"""
self.plt.figure(2)
self.plt.clf()
self.plt.subplot(1, 3, 1)
self.plt.imshow(weights_in.T, vmin=-1, vmax=1, interpolation='nearest')
self.plt.title('input.T')
self.plt.subplot(1, 3, 2)
self.plt.imshow(weights_rec, vmin=-1, vmax=1, interpolation='nearest')
self.plt.title('recurrent')
self.plt.subplot(1, 3, 3)
self.plt.imshow(weights_out, vmin=-1, vmax=1, interpolation='nearest')
self.plt.title('output')
self.plt.colorbar()
self.plt.draw()
self.plt.show()
def plot_lstm_wts(self, lstm_layer, scale=1, fig=4):
"""
Visizualize the three weight matrices after every epoch. Serves to
check that weights are structured, not exploding, and get upated
"""
self.plt.figure(fig)
self.plt.clf()
pltidx = 1
for lbl, wts in zip(lstm_layer.param_names, lstm_layer.params[:4]):
self.plt.subplot(2, 4, pltidx)
self.plt.imshow(wts.asnumpyarray().T, vmin=-scale, vmax=scale,
interpolation='nearest')
self.plt.title(lbl + ' Wx.T')
pltidx += 1
for lbl, wts, bs in zip(lstm_layer.param_names,
lstm_layer.params[4:8],
lstm_layer.params[8:12]):
self.plt.subplot(2, 4, pltidx)
self.plt.imshow(np.hstack((wts.asnumpyarray(),
bs.asnumpyarray(),
bs.asnumpyarray())).T,
vmin=-scale, vmax=scale, interpolation='nearest')
self.plt.title(lbl + ' Wh.T')
pltidx += 1
self.plt.draw()
self.plt.show()
def plot_lstm_acts(self, lstm_layer, scale=1, fig=4):
acts_lbl = ['i_t', 'f_t', 'o_t', 'g_t', 'net_i', 'c_t', 'c_t', 'c_phi']
acts_stp = [0, 0, 0, 1, 0, 0, 1, 1]
self.plt.figure(fig)
self.plt.clf()
for idx, lbl in enumerate(acts_lbl):
act_tsr = getattr(lstm_layer, lbl)[acts_stp[idx]]
self.plt.subplot(2, 4, idx+1)
self.plt.imshow(act_tsr.asnumpyarray().T,
vmin=-scale, vmax=scale, interpolation='nearest')
self.plt.title(lbl + '[' + str(acts_stp[idx]) + '].T')
self.plt.draw()
self.plt.show()
def plot_error(self, suberror_list, error_list):
self.plt.figure(1)
self.plt.clf()
self.plt.plot(np.arange(len(suberror_list)) /
np.float(len(suberror_list)) *
len(error_list), suberror_list)
self.plt.plot(error_list, linewidth=2)
self.plt.ylim((min(suberror_list), max(error_list)))
self.plt.draw()
self.plt.show()
def plot_activations(self, pre1, out1, pre2, out2, targets):
"""
Loop over tau unrolling steps, at each time step show the pre-acts
and outputs of the recurrent layer and output layer. Note that the
pre-acts are actually the g', so if the activation is linear it will
be one.
"""
self.plt.figure(3)
self.plt.clf()
for i in range(len(pre1)): # loop over unrolling
self.plt.subplot(len(pre1), 5, 5 * i + 1)
self.plt.imshow(pre1[i].asnumpyarray(), vmin=-1, vmax=1,
interpolation='nearest')
if i == 0:
self.plt.title('pre1 or g\'1')
self.plt.subplot(len(pre1), 5, 5 * i + 2)
self.plt.imshow(out1[i].asnumpyarray(), vmin=-1, vmax=1,
interpolation='nearest')
if i == 0:
self.plt.title('out1')
self.plt.subplot(len(pre1), 5, 5 * i + 3)
self.plt.imshow(pre2[i].asnumpyarray(), vmin=-1, vmax=1,
interpolation='nearest')
if i == 0:
self.plt.title('pre2 or g\'2')
self.plt.subplot(len(pre1), 5, 5 * i + 4)
self.plt.imshow(out2[i].asnumpyarray(), vmin=-1, vmax=1,
interpolation='nearest')
if i == 0:
self.plt.title('out2')
self.plt.subplot(len(pre1), 5, 5 * i + 5)
self.plt.imshow(targets[i].asnumpyarray(),
vmin=-1, vmax=1, interpolation='nearest')
if i == 0:
self.plt.title('target')
self.plt.draw()
self.plt.show()
def print_text(self, inputs, outputs):
"""
Moved this here so it's legal to use numpy.
"""
print("Prediction inputs")
print(np.argmax(inputs, 0).asnumpyarray().astype(np.int8).view('c'))
print("Prediction outputs")
print(np.argmax(outputs, 0).asnumpyarray().astype(np.int8).view('c'))
| apache-2.0 |
jzt5132/scikit-learn | doc/datasets/mldata_fixture.py | 367 | 1183 | """Fixture module to skip the datasets loading when offline
Mock urllib2 access to mldata.org and create a temporary data folder.
"""
from os import makedirs
from os.path import join
import numpy as np
import tempfile
import shutil
from sklearn import datasets
from sklearn.utils.testing import install_mldata_mock
from sklearn.utils.testing import uninstall_mldata_mock
def globs(globs):
# Create a temporary folder for the data fetcher
global custom_data_home
custom_data_home = tempfile.mkdtemp()
makedirs(join(custom_data_home, 'mldata'))
globs['custom_data_home'] = custom_data_home
return globs
def setup_module():
# setup mock urllib2 module to avoid downloading from mldata.org
install_mldata_mock({
'mnist-original': {
'data': np.empty((70000, 784)),
'label': np.repeat(np.arange(10, dtype='d'), 7000),
},
'iris': {
'data': np.empty((150, 4)),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
})
def teardown_module():
uninstall_mldata_mock()
shutil.rmtree(custom_data_home)
| bsd-3-clause |
Diwahars/spark-timeseries | python/sparkts/timeseriesrdd.py | 1 | 10904 | from py4j.java_gateway import java_import
from pyspark import RDD
from pyspark.serializers import FramedSerializer, SpecialLengths, write_int, read_int
from pyspark.sql import DataFrame
from utils import datetime_to_millis
from datetimeindex import DateTimeIndex
import struct
import numpy as np
import pandas as pd
from io import BytesIO
class TimeSeriesRDD(RDD):
"""
A lazy distributed collection of univariate series with a conformed time dimension. Lazy in the
sense that it is an RDD: it encapsulates all the information needed to generate its elements,
but doesn't materialize them upon instantiation. Distributed in the sense that different
univariate series within the collection can be stored and processed on different nodes. Within
each univariate series, observations are not distributed. The time dimension is conformed in the
sense that a single DateTimeIndex applies to all the univariate series. Each univariate series
within the RDD has a String key to identify it.
"""
def __init__(self, dt_index, rdd, jtsrdd = None, sc = None):
if jtsrdd == None:
# Construct from a Python RDD object and a Python DateTimeIndex
jvm = rdd.ctx._jvm
jrdd = rdd._reserialize(_TimeSeriesSerializer())._jrdd.map( \
jvm.com.cloudera.sparkts.BytesToKeyAndSeries())
self._jtsrdd = jvm.com.cloudera.sparkts.TimeSeriesRDD( \
dt_index._jdt_index, jrdd.rdd())
RDD.__init__(self, rdd._jrdd, rdd.ctx)
else:
# Construct from a py4j.JavaObject pointing to a TimeSeriesRDD and a Python SparkContext
jvm = sc._jvm
jrdd = jvm.org.apache.spark.api.java.JavaRDD(jtsrdd, None).map( \
jvm.com.cloudera.sparkts.KeyAndSeriesToBytes())
RDD.__init__(self, jrdd, sc, _TimeSeriesSerializer())
self._jtsrdd = jtsrdd
def __getitem__(self, val):
"""
Returns a TimeSeriesRDD representing a subslice of this TimeSeriesRDD, containing only
values for a sub-range of the time it covers.
"""
start = datetime_to_millis(val.start)
stop = datetime_to_millis(val.stop)
return TimeSeriesRDD(None, None, self._jtsrdd.slice(start, stop), self.ctx)
def differences(self, n):
"""
Returns a TimeSeriesRDD where each time series is differenced with the given order.
The new RDD will be missing the first n date-times.
Parameters
----------
n : int
The order of differencing to perform.
"""
return TimeSeriesRDD(None, None, self._jtsrdd.differences(n), self.ctx)
def fill(self, method):
"""
Returns a TimeSeriesRDD with missing values imputed using the given method.
Parameters
----------
method : string
"nearest" fills in NaNs with the closest non-NaN value, using the closest previous value
in the case of a tie. "linear" does a linear interpolation from the closest filled-in
values. "next" uses the closest value that is in the future of the missing value.
"previous" uses the closest value from the past of the missing value. "spline"
interpolates using a cubic spline.
"""
return TimeSeriesRDD(None, None, self._jtsrdd.fill(method), self.ctx)
def map_series(self, fn, dt_index = None):
"""
Returns a TimeSeriesRDD, with a transformation applied to all the series in this RDD.
Either the series produced by the given function should conform to this TimeSeriesRDD's
index, or a new DateTimeIndex should be given that they conform to.
Parameters
----------
fn : function
A function that maps arrays of floats to arrays of floats.
dt_index : DateTimeIndex
A DateTimeIndex for the produced TimeseriesRDD.
"""
if dt_index == None:
dt_index = self.index()
return TimeSeriesRDD(dt_index, self.map(fn))
def to_instants(self):
"""
Returns an RDD of instants, each a horizontal slice of this TimeSeriesRDD at a time.
This essentially transposes the TimeSeriesRDD, producing an RDD of tuples of datetime and
a numpy array containing all the observations that occurred at that time.
"""
jrdd = self._jtsrdd.toInstants(-1).toJavaRDD().map( \
self.ctx._jvm.com.cloudera.sparkts.InstantToBytes())
return RDD(jrdd, self.ctx, _InstantDeserializer())
def to_instants_dataframe(self, sql_ctx):
"""
Returns a DataFrame of instants, each a horizontal slice of this TimeSeriesRDD at a time.
This essentially transposes the TimeSeriesRDD, producing a DataFrame where each column
is a key form one of the rows in the TimeSeriesRDD.
"""
ssql_ctx = sql_ctx._ssql_ctx
jdf = self._jtsrdd.toInstantsDataFrame(ssql_ctx, -1)
return DataFrame(jdf, sql_ctx)
def index(self):
"""Returns the index describing the times referred to by the elements of this TimeSeriesRDD
"""
jindex = self._jtsrdd.index()
return DateTimeIndex(jindex)
def to_observations_dataframe(self, sql_ctx, ts_col='timestamp', key_col='key', val_col='value'):
"""
Returns a DataFrame of observations, each containing a timestamp, a key, and a value.
Parameters
----------
sql_ctx : SQLContext
ts_col : string
The name for the timestamp column.
key_col : string
The name for the key column.
val_col : string
The name for the value column.
"""
ssql_ctx = sql_ctx._ssql_ctx
jdf = self._jtsrdd.toObservationsDataFrame(ssql_ctx, ts_col, key_col, val_col)
return DataFrame(jdf, sql_ctx)
def to_pandas_series_rdd(self):
"""
Returns an RDD of Pandas Series objects indexed with Pandas DatetimeIndexes
"""
pd_index = self.index().to_pandas_index()
return self.map(lambda x: (x[0], pd.Series(x[1], pd_index)))
def remove_instants_with_nans(self):
"""
Returns a TimeSeriesRDD with instants containing NaNs cut out.
The resulting TimeSeriesRDD has a slimmed down DateTimeIndex, missing all the instants
for which any series in the RDD contained a NaN.
"""
return TimeSeriesRDD(None, None, self._jtsrdd.removeInstantsWithNaNs(), self.ctx)
def filter(self, predicate):
return TimeSeriesRDD(self.index(), super(TimeSeriesRDD, self).filter(predicate))
def find_series(self, key):
"""
Finds a series in the TimeSeriesRDD by its key.
Parameters
----------
key : string
The key of the series to find.
"""
# TODO: this could be more efficient if we pushed it down into Java
return self.filter(lambda x: x[0] == key).first()[1]
def return_rates(self):
"""
Returns a TimeSeriesRDD where each series is a return rate series for a series in this RDD.
Assumes periodic (as opposed to continuously compounded) returns.
"""
return TimeSeriesRDD(None, None, self._jtsrdd.returnRates(), self.ctx)
def with_index(self, new_index):
"""
Returns a TimeSeriesRDD rebased on top of a new index. Any timestamps that exist in the new
index but not in the existing index will be filled in with NaNs.
Parameters
----------
new_index : DateTimeIndex
"""
return TimeSeriesRDD(None, None, self._jtsrdd.withIndex(new_index._jdt_index), self.ctx)
def time_series_rdd_from_pandas_series_rdd(series_rdd, sc):
"""
Instantiates a TimeSeriesRDD from an RDD of Pandas Series objects.
The series in the RDD are all expected to have the same DatetimeIndex.
Parameters
----------
series_rdd : RDD of (string, pandas.Series) tuples
sc : SparkContext
"""
first = series_rdd.first()
dt_index = irregular(first[1].index, sc)
return TimeSeriesRDD(dt_index, series_rdd.mapValues(lambda x: x.values))
def time_series_rdd_from_observations(dt_index, df, ts_col, key_col, val_col):
"""
Instantiates a TimeSeriesRDD from a DataFrame of observations.
An observation is a row containing a timestamp, a string key, and float value.
Parameters
----------
dt_index : DateTimeIndex
The index of the RDD to create. Observations not contained in this index will be ignored.
df : DataFrame
ts_col : string
The name of the column in the DataFrame containing the timestamps.
key_col : string
The name of the column in the DataFrame containing the keys.
val_col : string
The name of the column in the DataFrame containing the values.
"""
jvm = df._sc._jvm
jtsrdd = jvm.com.cloudera.sparkts.TimeSeriesRDD.timeSeriesRDDFromObservations( \
dt_index._jdt_index, df._jdf, ts_col, key_col, val_col)
return TimeSeriesRDD(None, None, jtsrdd, df._sc)
class _TimeSeriesSerializer(FramedSerializer):
"""Serializes (key, vector) pairs to and from bytes. Must be compatible with the Scala
implementation in com.cloudera.sparkts.{BytesToKeyAndSeries, KeyAndSeriesToBytes}
"""
def dumps(self, obj):
stream = BytesIO()
(key, vector) = obj
key_bytes = key.encode('utf-8')
write_int(len(key_bytes), stream)
stream.write(key_bytes)
write_int(len(vector), stream)
# TODO: maybe some optimized way to write this all at once?
for value in vector:
stream.write(struct.pack('!d', value))
stream.seek(0)
return stream.read()
def loads(self, obj):
stream = BytesIO(obj)
key_length = read_int(stream)
key = stream.read(key_length).decode('utf-8')
return (key, _read_vec(stream))
def __repr__(self):
return '_TimeSeriesSerializer'
class _InstantDeserializer(FramedSerializer):
"""
Serializes (timestamp, vector) pairs to an from bytes. Must be compatible with the Scala
implementation in com.cloudera.sparkts.InstantToBytes
"""
def loads(self, obj):
stream = BytesIO(obj)
timestamp_ms = struct.unpack('!q', stream.read(8))[0]
return (pd.Timestamp(timestamp_ms * 1000000), _read_vec(stream))
def __repr__(self):
return "_InstantDeserializer"
def _read_vec(stream):
vector_length = read_int(stream)
vector = np.empty(vector_length)
# TODO: maybe some optimized way to read this all at once?
for i in xrange(vector_length):
vector[i] = struct.unpack('!d', stream.read(8))[0]
return vector
| apache-2.0 |
chengsoonong/digbeta | dchen/music/src/PLGEN1_bp.py | 2 | 2684 | import os
import sys
import gzip
import time
import numpy as np
import pickle as pkl
from sklearn.metrics import roc_auc_score
from models import BPMTC
if len(sys.argv) != 8:
print('Usage: python', sys.argv[0],
'WORK_DIR DATASET C1 C2 C3 P TRAIN_DEV(Y/N)')
sys.exit(0)
else:
work_dir = sys.argv[1]
dataset = sys.argv[2]
C1 = float(sys.argv[3])
C2 = float(sys.argv[4])
C3 = float(sys.argv[5])
p = float(sys.argv[6])
trndev = sys.argv[7]
# assert trndev in ['Y', 'N']
# assert trndev == 'Y'
if trndev != 'Y':
raise ValueError('trndev should be "Y"')
data_dir = os.path.join(work_dir, 'data/%s/setting3' % dataset)
fx = os.path.join(data_dir, 'X.pkl.gz')
fytrain = os.path.join(data_dir, 'Y_train.pkl.gz')
fytest = os.path.join(data_dir, 'Y_test.pkl.gz')
fcliques_train = os.path.join(data_dir, 'cliques_train.pkl.gz')
fcliques_all = os.path.join(data_dir, 'cliques_all.pkl.gz')
fprefix = 'trndev-plgen1-bp-%g-%g-%g-%g' % (C1, C2, C3, p)
fmodel = os.path.join(data_dir, '%s.pkl.gz' % fprefix)
fnpy = os.path.join(data_dir, '%s.npy' % fprefix)
X = pkl.load(gzip.open(fx, 'rb'))
Y_train = pkl.load(gzip.open(fytrain, 'rb'))
Y_test = pkl.load(gzip.open(fytest, 'rb'))
cliques_train = pkl.load(gzip.open(fcliques_train, 'rb'))
cliques_all = pkl.load(gzip.open(fcliques_all, 'rb'))
print('C: %g, %g, %g, p: %g' % (C1, C2, C3, p))
print(X.shape, Y_train.shape)
print(time.strftime('%Y-%m-%d %H:%M:%S'))
if os.path.exists(fmodel):
print('evaluating ...')
clf = pkl.load(gzip.open(fmodel, 'rb')) # for evaluation
else:
print('training ...')
clf = BPMTC(X, Y_train, C1=C1, C2=C2, C3=C3, p=p, user_playlist_indices=cliques_train)
clf.fit(verbose=2, fnpy=fnpy)
if clf.trained is True:
pkl.dump(clf, gzip.open(fmodel, 'wb'))
pl2u = np.zeros(Y_train.shape[1] + Y_test.shape[1], dtype=np.int)
U = len(cliques_train)
assert len(cliques_all) == U
for u in range(U):
clq = cliques_all[u]
pl2u[clq] = u
assert np.all(clf.pl2u == pl2u[:Y_train.shape[1]])
rps = []
aucs = []
offset = Y_train.shape[1]
for j in range(Y_test.shape[1]):
y_true = Y_test[:, j].A.reshape(-1)
npos = y_true.sum()
assert npos > 0
u = pl2u[j + offset]
wj = clf.V[u, :] + clf.mu
y_pred = np.dot(X, wj).reshape(-1)
sortix = np.argsort(-y_pred)
y_ = y_true[sortix]
rps.append(np.mean(y_[:npos]))
aucs.append(roc_auc_score(y_true, y_pred))
clf.metric_score = (np.mean(rps), np.mean(aucs), len(rps), Y_test.shape[1])
pkl.dump(clf, gzip.open(fmodel, 'wb'))
print('\n%g, %g, %d / %d' % clf.metric_score)
| gpl-3.0 |
benanne/kaggle-galaxies | predict_augmented_npy_maxout2048.py | 8 | 9452 | """
Load an analysis file and redo the predictions on the validation set / test set,
this time with augmented data and averaging. Store them as numpy files.
"""
import numpy as np
# import pandas as pd
import theano
import theano.tensor as T
import layers
import cc_layers
import custom
import load_data
import realtime_augmentation as ra
import time
import csv
import os
import cPickle as pickle
BATCH_SIZE = 32 # 16
NUM_INPUT_FEATURES = 3
CHUNK_SIZE = 8000 # 10000 # this should be a multiple of the batch size
# ANALYSIS_PATH = "analysis/try_convnet_cc_multirot_3x69r45_untied_bias.pkl"
ANALYSIS_PATH = "analysis/final/try_convnet_cc_multirotflip_3x69r45_maxout2048.pkl"
DO_VALID = True # disable this to not bother with the validation set evaluation
DO_TEST = True # disable this to not generate predictions on the testset
target_filename = os.path.basename(ANALYSIS_PATH).replace(".pkl", ".npy.gz")
target_path_valid = os.path.join("predictions/final/augmented/valid", target_filename)
target_path_test = os.path.join("predictions/final/augmented/test", target_filename)
print "Loading model data etc."
analysis = np.load(ANALYSIS_PATH)
input_sizes = [(69, 69), (69, 69)]
ds_transforms = [
ra.build_ds_transform(3.0, target_size=input_sizes[0]),
ra.build_ds_transform(3.0, target_size=input_sizes[1]) + ra.build_augmentation_transform(rotation=45)]
num_input_representations = len(ds_transforms)
# split training data into training + a small validation set
num_train = load_data.num_train
num_valid = num_train // 10 # integer division
num_train -= num_valid
num_test = load_data.num_test
valid_ids = load_data.train_ids[num_train:]
train_ids = load_data.train_ids[:num_train]
test_ids = load_data.test_ids
train_indices = np.arange(num_train)
valid_indices = np.arange(num_train, num_train+num_valid)
test_indices = np.arange(num_test)
y_valid = np.load("data/solutions_train.npy")[num_train:]
print "Build model"
l0 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[0][0], input_sizes[0][1])
l0_45 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[1][0], input_sizes[1][1])
l0r = layers.MultiRotSliceLayer([l0, l0_45], part_size=45, include_flip=True)
l0s = cc_layers.ShuffleBC01ToC01BLayer(l0r)
l1a = cc_layers.CudaConvnetConv2DLayer(l0s, n_filters=32, filter_size=6, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l1 = cc_layers.CudaConvnetPooling2DLayer(l1a, pool_size=2)
l2a = cc_layers.CudaConvnetConv2DLayer(l1, n_filters=64, filter_size=5, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l2 = cc_layers.CudaConvnetPooling2DLayer(l2a, pool_size=2)
l3a = cc_layers.CudaConvnetConv2DLayer(l2, n_filters=128, filter_size=3, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l3b = cc_layers.CudaConvnetConv2DLayer(l3a, n_filters=128, filter_size=3, pad=0, weights_std=0.1, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l3 = cc_layers.CudaConvnetPooling2DLayer(l3b, pool_size=2)
l3s = cc_layers.ShuffleC01BToBC01Layer(l3)
j3 = layers.MultiRotMergeLayer(l3s, num_views=4) # 2) # merge convolutional parts
# l4 = layers.DenseLayer(j3, n_outputs=4096, weights_std=0.001, init_bias_value=0.01, dropout=0.5)
l4a = layers.DenseLayer(j3, n_outputs=4096, weights_std=0.001, init_bias_value=0.01, dropout=0.5, nonlinearity=layers.identity)
l4 = layers.FeatureMaxPoolingLayer(l4a, pool_size=2, feature_dim=1, implementation='reshape')
# l5 = layers.DenseLayer(l4, n_outputs=37, weights_std=0.01, init_bias_value=0.0, dropout=0.5, nonlinearity=custom.clip_01) # nonlinearity=layers.identity)
l5 = layers.DenseLayer(l4, n_outputs=37, weights_std=0.01, init_bias_value=0.1, dropout=0.5, nonlinearity=layers.identity)
# l6 = layers.OutputLayer(l5, error_measure='mse')
l6 = custom.OptimisedDivGalaxyOutputLayer(l5) # this incorporates the constraints on the output (probabilities sum to one, weighting, etc.)
xs_shared = [theano.shared(np.zeros((1,1,1,1), dtype=theano.config.floatX)) for _ in xrange(num_input_representations)]
idx = T.lscalar('idx')
givens = {
l0.input_var: xs_shared[0][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
l0_45.input_var: xs_shared[1][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
}
compute_output = theano.function([idx], l6.predictions(dropout_active=False), givens=givens)
print "Load model parameters"
layers.set_param_values(l6, analysis['param_values'])
print "Create generators"
# set here which transforms to use to make predictions
augmentation_transforms = []
for zoom in [1 / 1.2, 1.0, 1.2]:
for angle in np.linspace(0, 360, 10, endpoint=False):
augmentation_transforms.append(ra.build_augmentation_transform(rotation=angle, zoom=zoom))
augmentation_transforms.append(ra.build_augmentation_transform(rotation=(angle + 180), zoom=zoom, shear=180)) # flipped
print " %d augmentation transforms." % len(augmentation_transforms)
augmented_data_gen_valid = ra.realtime_fixed_augmented_data_gen(valid_indices, 'train', augmentation_transforms=augmentation_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes, ds_transforms=ds_transforms)
valid_gen = load_data.buffered_gen_mp(augmented_data_gen_valid, buffer_size=1)
augmented_data_gen_test = ra.realtime_fixed_augmented_data_gen(test_indices, 'test', augmentation_transforms=augmentation_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes, ds_transforms=ds_transforms)
test_gen = load_data.buffered_gen_mp(augmented_data_gen_test, buffer_size=1)
approx_num_chunks_valid = int(np.ceil(num_valid * len(augmentation_transforms) / float(CHUNK_SIZE)))
approx_num_chunks_test = int(np.ceil(num_test * len(augmentation_transforms) / float(CHUNK_SIZE)))
print "Approximately %d chunks for the validation set" % approx_num_chunks_valid
print "Approximately %d chunks for the test set" % approx_num_chunks_test
if DO_VALID:
print
print "VALIDATION SET"
print "Compute predictions"
predictions_list = []
start_time = time.time()
for e, (chunk_data, chunk_length) in enumerate(valid_gen):
print "Chunk %d" % (e + 1)
xs_chunk = chunk_data
# need to transpose the chunks to move the 'channels' dimension up
xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk]
print " load data onto GPU"
for x_shared, x_chunk in zip(xs_shared, xs_chunk):
x_shared.set_value(x_chunk)
num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE)))
# make predictions, don't forget to cute off the zeros at the end
predictions_chunk_list = []
for b in xrange(num_batches_chunk):
if b % 1000 == 0:
print " batch %d/%d" % (b + 1, num_batches_chunk)
predictions = compute_output(b)
predictions_chunk_list.append(predictions)
predictions_chunk = np.vstack(predictions_chunk_list)
predictions_chunk = predictions_chunk[:chunk_length] # cut off zeros / padding
print " compute average over transforms"
predictions_chunk_avg = predictions_chunk.reshape(-1, len(augmentation_transforms), 37).mean(1)
predictions_list.append(predictions_chunk_avg)
time_since_start = time.time() - start_time
print " %s since start" % load_data.hms(time_since_start)
all_predictions = np.vstack(predictions_list)
print "Write predictions to %s" % target_path_valid
load_data.save_gz(target_path_valid, all_predictions)
print "Evaluate"
rmse_valid = analysis['losses_valid'][-1]
rmse_augmented = np.sqrt(np.mean((y_valid - all_predictions)**2))
print " MSE (last iteration):\t%.6f" % rmse_valid
print " MSE (augmented):\t%.6f" % rmse_augmented
if DO_TEST:
print
print "TEST SET"
print "Compute predictions"
predictions_list = []
start_time = time.time()
for e, (chunk_data, chunk_length) in enumerate(test_gen):
print "Chunk %d" % (e + 1)
xs_chunk = chunk_data
# need to transpose the chunks to move the 'channels' dimension up
xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk]
print " load data onto GPU"
for x_shared, x_chunk in zip(xs_shared, xs_chunk):
x_shared.set_value(x_chunk)
num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE)))
# make predictions, don't forget to cute off the zeros at the end
predictions_chunk_list = []
for b in xrange(num_batches_chunk):
if b % 1000 == 0:
print " batch %d/%d" % (b + 1, num_batches_chunk)
predictions = compute_output(b)
predictions_chunk_list.append(predictions)
predictions_chunk = np.vstack(predictions_chunk_list)
predictions_chunk = predictions_chunk[:chunk_length] # cut off zeros / padding
print " compute average over transforms"
predictions_chunk_avg = predictions_chunk.reshape(-1, len(augmentation_transforms), 37).mean(1)
predictions_list.append(predictions_chunk_avg)
time_since_start = time.time() - start_time
print " %s since start" % load_data.hms(time_since_start)
all_predictions = np.vstack(predictions_list)
print "Write predictions to %s" % target_path_test
load_data.save_gz(target_path_test, all_predictions)
print "Done!"
| bsd-3-clause |
shusenl/scikit-learn | examples/semi_supervised/plot_label_propagation_digits_active_learning.py | 294 | 3417 | """
========================================
Label Propagation digits active learning
========================================
Demonstrates an active learning technique to learn handwritten digits
using label propagation.
We start by training a label propagation model with only 10 labeled points,
then we select the top five most uncertain points to label. Next, we train
with 15 labeled points (original 10 + 5 new ones). We repeat this process
four times to have a model trained with 30 labeled examples.
A plot will appear showing the top 5 most uncertain digits for each iteration
of training. These may or may not contain mistakes, but we will train the next
model with their true labels.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import classification_report, confusion_matrix
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 10
unlabeled_indices = np.arange(n_total_samples)[n_labeled_points:]
f = plt.figure()
for i in range(5):
y_train = np.copy(y)
y_train[unlabeled_indices] = -1
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_indices]
true_labels = y[unlabeled_indices]
cm = confusion_matrix(true_labels, predicted_labels,
labels=lp_model.classes_)
print('Iteration %i %s' % (i, 70 * '_'))
print("Label Spreading model: %d labeled & %d unlabeled (%d total)"
% (n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# compute the entropies of transduced label distributions
pred_entropies = stats.distributions.entropy(
lp_model.label_distributions_.T)
# select five digit examples that the classifier is most uncertain about
uncertainty_index = uncertainty_index = np.argsort(pred_entropies)[-5:]
# keep track of indices that we get labels for
delete_indices = np.array([])
f.text(.05, (1 - (i + 1) * .183),
"model %d\n\nfit with\n%d labels" % ((i + 1), i * 5 + 10), size=10)
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(5, 5, index + 1 + (5 * i))
sub.imshow(image, cmap=plt.cm.gray_r)
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]), size=10)
sub.axis('off')
# labeling 5 points, remote from labeled set
delete_index, = np.where(unlabeled_indices == image_index)
delete_indices = np.concatenate((delete_indices, delete_index))
unlabeled_indices = np.delete(unlabeled_indices, delete_indices)
n_labeled_points += 5
f.suptitle("Active learning with Label Propagation.\nRows show 5 most "
"uncertain labels to learn with the next model.")
plt.subplots_adjust(0.12, 0.03, 0.9, 0.8, 0.2, 0.45)
plt.show()
| bsd-3-clause |
adwaitjog/mafia | bin/aerialvision.py | 2 | 3885 | #!/usr/bin/python
#
# File: aerialvision.py
#
# Copyright (C) 2009 by Aaron Ariel, Tor M. Aamodt, Andrew Turner
# and the University of British Columbia, Vancouver,
# BC V6T 1Z4, All Rights Reserved.
#
# THIS IS A LEGAL DOCUMENT BY DOWNLOADING GPGPU-SIM, YOU ARE AGREEING TO THESE
# TERMS AND CONDITIONS.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNERS OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# NOTE: The files libcuda/cuda_runtime_api.c and src/cuda-sim/cuda-math.h
# are derived from the CUDA Toolset available from http://www.nvidia.com/cuda
# (property of NVIDIA). The files benchmarks/BlackScholes/ and
# benchmarks/template/ are derived from the CUDA SDK available from
# http://www.nvidia.com/cuda (also property of NVIDIA). The files from
# src/intersim/ are derived from Booksim (a simulator provided with the
# textbook "Principles and Practices of Interconnection Networks" available
# from http://cva.stanford.edu/books/ppin/). As such, those files are bound by
# the corresponding legal terms and conditions set forth separately (original
# copyright notices are left in files from these sources and where we have
# modified a file our copyright notice appears before the original copyright
# notice).
#
# Using this version of GPGPU-Sim requires a complete installation of CUDA
# which is distributed seperately by NVIDIA under separate terms and
# conditions. To use this version of GPGPU-Sim with OpenCL requires a
# recent version of NVIDIA's drivers which support OpenCL.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the University of British Columbia nor the names of
# its contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# 4. This version of GPGPU-SIM is distributed freely for non-commercial use only.
#
# 5. No nonprofit user may place any restrictions on the use of this software,
# including as modified by the user, by any other authorized user.
#
# 6. GPGPU-SIM was developed primarily by Tor M. Aamodt, Wilson W. L. Fung,
# Ali Bakhoda, George L. Yuan, at the University of British Columbia,
# Vancouver, BC V6T 1Z4
import sys
import os
if not os.environ['HOME']:
print 'please set your HOME environment variable to your home directory'
sys.exit
if not os.environ['GPGPUSIM_ROOT']:
print 'please set your GPGPUSIM_ROOT environment variable to your home directory'
sys.exit
sys.path.append( os.environ['GPGPUSIM_ROOT'] + '/aerialvision/' )
import Tkinter as Tk
import Pmw
import startup
import time
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from matplotlib.figure import Figure
startup.fileInput(sys.argv[1:])
| gpl-3.0 |
mortada/tensorflow | tensorflow/tools/dist_test/python/census_widendeep.py | 54 | 11900 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Distributed training and evaluation of a wide and deep model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import os
import sys
from six.moves import urllib
import tensorflow as tf
from tensorflow.contrib.learn.python.learn import learn_runner
from tensorflow.contrib.learn.python.learn.estimators import run_config
# Constants: Data download URLs
TRAIN_DATA_URL = "http://mlr.cs.umass.edu/ml/machine-learning-databases/adult/adult.data"
TEST_DATA_URL = "http://mlr.cs.umass.edu/ml/machine-learning-databases/adult/adult.test"
# Define features for the model
def census_model_config():
"""Configuration for the census Wide & Deep model.
Returns:
columns: Column names to retrieve from the data source
label_column: Name of the label column
wide_columns: List of wide columns
deep_columns: List of deep columns
categorical_column_names: Names of the categorical columns
continuous_column_names: Names of the continuous columns
"""
# 1. Categorical base columns.
gender = tf.contrib.layers.sparse_column_with_keys(
column_name="gender", keys=["female", "male"])
race = tf.contrib.layers.sparse_column_with_keys(
column_name="race",
keys=["Amer-Indian-Eskimo",
"Asian-Pac-Islander",
"Black",
"Other",
"White"])
education = tf.contrib.layers.sparse_column_with_hash_bucket(
"education", hash_bucket_size=1000)
marital_status = tf.contrib.layers.sparse_column_with_hash_bucket(
"marital_status", hash_bucket_size=100)
relationship = tf.contrib.layers.sparse_column_with_hash_bucket(
"relationship", hash_bucket_size=100)
workclass = tf.contrib.layers.sparse_column_with_hash_bucket(
"workclass", hash_bucket_size=100)
occupation = tf.contrib.layers.sparse_column_with_hash_bucket(
"occupation", hash_bucket_size=1000)
native_country = tf.contrib.layers.sparse_column_with_hash_bucket(
"native_country", hash_bucket_size=1000)
# 2. Continuous base columns.
age = tf.contrib.layers.real_valued_column("age")
age_buckets = tf.contrib.layers.bucketized_column(
age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
education_num = tf.contrib.layers.real_valued_column("education_num")
capital_gain = tf.contrib.layers.real_valued_column("capital_gain")
capital_loss = tf.contrib.layers.real_valued_column("capital_loss")
hours_per_week = tf.contrib.layers.real_valued_column("hours_per_week")
wide_columns = [
gender, native_country, education, occupation, workclass,
marital_status, relationship, age_buckets,
tf.contrib.layers.crossed_column([education, occupation],
hash_bucket_size=int(1e4)),
tf.contrib.layers.crossed_column([native_country, occupation],
hash_bucket_size=int(1e4)),
tf.contrib.layers.crossed_column([age_buckets, race, occupation],
hash_bucket_size=int(1e6))]
deep_columns = [
tf.contrib.layers.embedding_column(workclass, dimension=8),
tf.contrib.layers.embedding_column(education, dimension=8),
tf.contrib.layers.embedding_column(marital_status, dimension=8),
tf.contrib.layers.embedding_column(gender, dimension=8),
tf.contrib.layers.embedding_column(relationship, dimension=8),
tf.contrib.layers.embedding_column(race, dimension=8),
tf.contrib.layers.embedding_column(native_country, dimension=8),
tf.contrib.layers.embedding_column(occupation, dimension=8),
age, education_num, capital_gain, capital_loss, hours_per_week]
# Define the column names for the data sets.
columns = ["age", "workclass", "fnlwgt", "education", "education_num",
"marital_status", "occupation", "relationship", "race", "gender",
"capital_gain", "capital_loss", "hours_per_week",
"native_country", "income_bracket"]
label_column = "label"
categorical_columns = ["workclass", "education", "marital_status",
"occupation", "relationship", "race", "gender",
"native_country"]
continuous_columns = ["age", "education_num", "capital_gain",
"capital_loss", "hours_per_week"]
return (columns, label_column, wide_columns, deep_columns,
categorical_columns, continuous_columns)
class CensusDataSource(object):
"""Source of census data."""
def __init__(self, data_dir, train_data_url, test_data_url,
columns, label_column,
categorical_columns, continuous_columns):
"""Constructor of CensusDataSource.
Args:
data_dir: Directory to save/load the data files
train_data_url: URL from which the training data can be downloaded
test_data_url: URL from which the test data can be downloaded
columns: Columns to retrieve from the data files (A list of strings)
label_column: Name of the label column
categorical_columns: Names of the categorical columns (A list of strings)
continuous_columns: Names of the continuous columsn (A list of strings)
"""
# Retrieve data from disk (if available) or download from the web.
train_file_path = os.path.join(data_dir, "adult.data")
if os.path.isfile(train_file_path):
print("Loading training data from file: %s" % train_file_path)
train_file = open(train_file_path)
else:
urllib.urlretrieve(train_data_url, train_file_path)
test_file_path = os.path.join(data_dir, "adult.test")
if os.path.isfile(test_file_path):
print("Loading test data from file: %s" % test_file_path)
test_file = open(test_file_path)
else:
test_file = open(test_file_path)
urllib.urlretrieve(test_data_url, test_file_path)
# Read the training and testing data sets into Pandas DataFrame.
import pandas # pylint: disable=g-import-not-at-top
self._df_train = pandas.read_csv(train_file, names=columns,
skipinitialspace=True)
self._df_test = pandas.read_csv(test_file, names=columns,
skipinitialspace=True, skiprows=1)
# Remove the NaN values in the last rows of the tables
self._df_train = self._df_train[:-1]
self._df_test = self._df_test[:-1]
# Apply the threshold to get the labels.
income_thresh = lambda x: ">50K" in x
self._df_train[label_column] = (
self._df_train["income_bracket"].apply(income_thresh)).astype(int)
self._df_test[label_column] = (
self._df_test["income_bracket"].apply(income_thresh)).astype(int)
self.label_column = label_column
self.categorical_columns = categorical_columns
self.continuous_columns = continuous_columns
def input_train_fn(self):
return self._input_fn(self._df_train)
def input_test_fn(self):
return self._input_fn(self._df_test)
# TODO(cais): Turn into minibatch feeder
def _input_fn(self, df):
"""Input data function.
Creates a dictionary mapping from each continuous feature column name
(k) to the values of that column stored in a constant Tensor.
Args:
df: data feed
Returns:
feature columns and labels
"""
continuous_cols = {k: tf.constant(df[k].values)
for k in self.continuous_columns}
# Creates a dictionary mapping from each categorical feature column name (k)
# to the values of that column stored in a tf.SparseTensor.
categorical_cols = {
k: tf.SparseTensor(
indices=[[i, 0] for i in range(df[k].size)],
values=df[k].values,
dense_shape=[df[k].size, 1])
for k in self.categorical_columns}
# Merges the two dictionaries into one.
feature_cols = dict(continuous_cols.items() + categorical_cols.items())
# Converts the label column into a constant Tensor.
label = tf.constant(df[self.label_column].values)
# Returns the feature columns and the label.
return feature_cols, label
def _create_experiment_fn(output_dir): # pylint: disable=unused-argument
"""Experiment creation function."""
(columns, label_column, wide_columns, deep_columns, categorical_columns,
continuous_columns) = census_model_config()
census_data_source = CensusDataSource(FLAGS.data_dir,
TRAIN_DATA_URL, TEST_DATA_URL,
columns, label_column,
categorical_columns,
continuous_columns)
os.environ["TF_CONFIG"] = json.dumps({
"cluster": {
tf.contrib.learn.TaskType.PS: ["fake_ps"] *
FLAGS.num_parameter_servers
},
"task": {
"index": FLAGS.worker_index
}
})
config = run_config.RunConfig(master=FLAGS.master_grpc_url)
estimator = tf.contrib.learn.DNNLinearCombinedClassifier(
model_dir=FLAGS.model_dir,
linear_feature_columns=wide_columns,
dnn_feature_columns=deep_columns,
dnn_hidden_units=[5],
config=config)
return tf.contrib.learn.Experiment(
estimator=estimator,
train_input_fn=census_data_source.input_train_fn,
eval_input_fn=census_data_source.input_test_fn,
train_steps=FLAGS.train_steps,
eval_steps=FLAGS.eval_steps
)
def main(unused_argv):
print("Worker index: %d" % FLAGS.worker_index)
learn_runner.run(experiment_fn=_create_experiment_fn,
output_dir=FLAGS.output_dir,
schedule=FLAGS.schedule)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--data_dir",
type=str,
default="/tmp/census-data",
help="Directory for storing the cesnsus data"
)
parser.add_argument(
"--model_dir",
type=str,
default="/tmp/census_wide_and_deep_model",
help="Directory for storing the model"
)
parser.add_argument(
"--output_dir",
type=str,
default="",
help="Base output directory."
)
parser.add_argument(
"--schedule",
type=str,
default="local_run",
help="Schedule to run for this experiment."
)
parser.add_argument(
"--master_grpc_url",
type=str,
default="",
help="URL to master GRPC tensorflow server, e.g.,grpc://127.0.0.1:2222"
)
parser.add_argument(
"--num_parameter_servers",
type=int,
default=0,
help="Number of parameter servers"
)
parser.add_argument(
"--worker_index",
type=int,
default=0,
help="Worker index (>=0)"
)
parser.add_argument(
"--train_steps",
type=int,
default=1000,
help="Number of training steps"
)
parser.add_argument(
"--eval_steps",
type=int,
default=1,
help="Number of evaluation steps"
)
global FLAGS # pylint:disable=global-at-module-level
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
xya/sms-tools | lectures/06-Harmonic-model/plots-code/harmonicModel-analysis-synthesis.py | 24 | 1387 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackmanharris
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import harmonicModel as HM
import sineModel as SM
import utilFunctions as UF
(fs, x) = UF.wavread('../../../sounds/vignesh.wav')
w = np.blackman(1201)
N = 2048
t = -90
nH = 100
minf0 = 130
maxf0 = 300
f0et = 7
Ns = 512
H = Ns/4
minSineDur = .1
harmDevSlope = 0.01
hfreq, hmag, hphase = HM.harmonicModelAnal(x, fs, w, N, H, t, nH, minf0, maxf0, f0et, harmDevSlope, minSineDur)
y = SM.sineModelSynth(hfreq, hmag, hphase, Ns, H, fs)
numFrames = int(hfreq[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
plt.figure(1, figsize=(9, 7))
plt.subplot(3,1,1)
plt.plot(np.arange(x.size)/float(fs), x, 'b')
plt.axis([0,x.size/float(fs),min(x),max(x)])
plt.title('x (vignesh.wav)')
plt.subplot(3,1,2)
yhfreq = hfreq
yhfreq[hfreq==0] = np.nan
plt.plot(frmTime, hfreq, lw=1.2)
plt.axis([0,y.size/float(fs),0,8000])
plt.title('f_h, harmonic frequencies')
plt.subplot(3,1,3)
plt.plot(np.arange(y.size)/float(fs), y, 'b')
plt.axis([0,y.size/float(fs),min(y),max(y)])
plt.title('yh')
plt.tight_layout()
UF.wavwrite(y, fs, 'vignesh-harmonic-synthesis.wav')
plt.savefig('harmonicModel-analysis-synthesis.png')
plt.show()
| agpl-3.0 |
MatthieuBizien/scikit-learn | examples/cluster/plot_kmeans_digits.py | 21 | 4524 | """
===========================================================
A demo of K-Means clustering on the handwritten digits data
===========================================================
In this example we compare the various initialization strategies for
K-means in terms of runtime and quality of the results.
As the ground truth is known here, we also apply different cluster
quality metrics to judge the goodness of fit of the cluster labels to the
ground truth.
Cluster quality metrics evaluated (see :ref:`clustering_evaluation` for
definitions and discussions of the metrics):
=========== ========================================================
Shorthand full name
=========== ========================================================
homo homogeneity score
compl completeness score
v-meas V measure
ARI adjusted Rand index
AMI adjusted mutual information
silhouette silhouette coefficient
=========== ========================================================
"""
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
np.random.seed(42)
digits = load_digits()
data = scale(digits.data)
n_samples, n_features = data.shape
n_digits = len(np.unique(digits.target))
labels = digits.target
sample_size = 300
print("n_digits: %d, \t n_samples %d, \t n_features %d"
% (n_digits, n_samples, n_features))
print(79 * '_')
print('% 9s' % 'init'
' time inertia homo compl v-meas ARI AMI silhouette')
def bench_k_means(estimator, name, data):
t0 = time()
estimator.fit(data)
print('% 9s %.2fs %i %.3f %.3f %.3f %.3f %.3f %.3f'
% (name, (time() - t0), estimator.inertia_,
metrics.homogeneity_score(labels, estimator.labels_),
metrics.completeness_score(labels, estimator.labels_),
metrics.v_measure_score(labels, estimator.labels_),
metrics.adjusted_rand_score(labels, estimator.labels_),
metrics.adjusted_mutual_info_score(labels, estimator.labels_),
metrics.silhouette_score(data, estimator.labels_,
metric='euclidean',
sample_size=sample_size)))
bench_k_means(KMeans(init='k-means++', n_clusters=n_digits, n_init=10),
name="k-means++", data=data)
bench_k_means(KMeans(init='random', n_clusters=n_digits, n_init=10),
name="random", data=data)
# in this case the seeding of the centers is deterministic, hence we run the
# kmeans algorithm only once with n_init=1
pca = PCA(n_components=n_digits).fit(data)
bench_k_means(KMeans(init=pca.components_, n_clusters=n_digits, n_init=1),
name="PCA-based",
data=data)
print(79 * '_')
###############################################################################
# Visualize the results on PCA-reduced data
reduced_data = PCA(n_components=2).fit_transform(data)
kmeans = KMeans(init='k-means++', n_clusters=n_digits, n_init=10)
kmeans.fit(reduced_data)
# Step size of the mesh. Decrease to increase the quality of the VQ.
h = .02 # point in the mesh [x_min, x_max]x[y_min, y_max].
# Plot the decision boundary. For that, we will assign a color to each
x_min, x_max = reduced_data[:, 0].min() - 1, reduced_data[:, 0].max() + 1
y_min, y_max = reduced_data[:, 1].min() - 1, reduced_data[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Obtain labels for each point in mesh. Use last trained model.
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1)
plt.clf()
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
plt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2)
# Plot the centroids as a white X
centroids = kmeans.cluster_centers_
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
plt.title('K-means clustering on the digits dataset (PCA-reduced data)\n'
'Centroids are marked with white cross')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
rs2/pandas | pandas/tests/frame/test_dtypes.py | 1 | 11812 | from collections import OrderedDict
from datetime import timedelta
import numpy as np
import pytest
from pandas.core.dtypes.dtypes import DatetimeTZDtype
import pandas as pd
from pandas import DataFrame, Series, Timestamp, date_range, option_context
import pandas._testing as tm
def _check_cast(df, v):
"""
Check if all dtypes of df are equal to v
"""
assert all(s.dtype.name == v for _, s in df.items())
class TestDataFrameDataTypes:
def test_concat_empty_dataframe_dtypes(self):
df = DataFrame(columns=list("abc"))
df["a"] = df["a"].astype(np.bool_)
df["b"] = df["b"].astype(np.int32)
df["c"] = df["c"].astype(np.float64)
result = pd.concat([df, df])
assert result["a"].dtype == np.bool_
assert result["b"].dtype == np.int32
assert result["c"].dtype == np.float64
result = pd.concat([df, df.astype(np.float64)])
assert result["a"].dtype == np.object_
assert result["b"].dtype == np.float64
assert result["c"].dtype == np.float64
def test_empty_frame_dtypes(self):
empty_df = pd.DataFrame()
tm.assert_series_equal(empty_df.dtypes, pd.Series(dtype=object))
nocols_df = pd.DataFrame(index=[1, 2, 3])
tm.assert_series_equal(nocols_df.dtypes, pd.Series(dtype=object))
norows_df = pd.DataFrame(columns=list("abc"))
tm.assert_series_equal(norows_df.dtypes, pd.Series(object, index=list("abc")))
norows_int_df = pd.DataFrame(columns=list("abc")).astype(np.int32)
tm.assert_series_equal(
norows_int_df.dtypes, pd.Series(np.dtype("int32"), index=list("abc"))
)
odict = OrderedDict
df = pd.DataFrame(odict([("a", 1), ("b", True), ("c", 1.0)]), index=[1, 2, 3])
ex_dtypes = pd.Series(
odict([("a", np.int64), ("b", np.bool_), ("c", np.float64)])
)
tm.assert_series_equal(df.dtypes, ex_dtypes)
# same but for empty slice of df
tm.assert_series_equal(df[:0].dtypes, ex_dtypes)
def test_datetime_with_tz_dtypes(self):
tzframe = DataFrame(
{
"A": date_range("20130101", periods=3),
"B": date_range("20130101", periods=3, tz="US/Eastern"),
"C": date_range("20130101", periods=3, tz="CET"),
}
)
tzframe.iloc[1, 1] = pd.NaT
tzframe.iloc[1, 2] = pd.NaT
result = tzframe.dtypes.sort_index()
expected = Series(
[
np.dtype("datetime64[ns]"),
DatetimeTZDtype("ns", "US/Eastern"),
DatetimeTZDtype("ns", "CET"),
],
["A", "B", "C"],
)
tm.assert_series_equal(result, expected)
def test_dtypes_are_correct_after_column_slice(self):
# GH6525
df = pd.DataFrame(index=range(5), columns=list("abc"), dtype=np.float_)
odict = OrderedDict
tm.assert_series_equal(
df.dtypes,
pd.Series(odict([("a", np.float_), ("b", np.float_), ("c", np.float_)])),
)
tm.assert_series_equal(
df.iloc[:, 2:].dtypes, pd.Series(odict([("c", np.float_)]))
)
tm.assert_series_equal(
df.dtypes,
pd.Series(odict([("a", np.float_), ("b", np.float_), ("c", np.float_)])),
)
def test_dtypes_gh8722(self, float_string_frame):
float_string_frame["bool"] = float_string_frame["A"] > 0
result = float_string_frame.dtypes
expected = Series(
{k: v.dtype for k, v in float_string_frame.items()}, index=result.index
)
tm.assert_series_equal(result, expected)
# compat, GH 8722
with option_context("use_inf_as_na", True):
df = DataFrame([[1]])
result = df.dtypes
tm.assert_series_equal(result, Series({0: np.dtype("int64")}))
def test_singlerow_slice_categoricaldtype_gives_series(self):
# GH29521
df = pd.DataFrame({"x": pd.Categorical("a b c d e".split())})
result = df.iloc[0]
raw_cat = pd.Categorical(["a"], categories=["a", "b", "c", "d", "e"])
expected = pd.Series(raw_cat, index=["x"], name=0, dtype="category")
tm.assert_series_equal(result, expected)
def test_timedeltas(self):
df = DataFrame(
dict(
A=Series(date_range("2012-1-1", periods=3, freq="D")),
B=Series([timedelta(days=i) for i in range(3)]),
)
)
result = df.dtypes
expected = Series(
[np.dtype("datetime64[ns]"), np.dtype("timedelta64[ns]")], index=list("AB")
)
tm.assert_series_equal(result, expected)
df["C"] = df["A"] + df["B"]
result = df.dtypes
expected = Series(
[
np.dtype("datetime64[ns]"),
np.dtype("timedelta64[ns]"),
np.dtype("datetime64[ns]"),
],
index=list("ABC"),
)
tm.assert_series_equal(result, expected)
# mixed int types
df["D"] = 1
result = df.dtypes
expected = Series(
[
np.dtype("datetime64[ns]"),
np.dtype("timedelta64[ns]"),
np.dtype("datetime64[ns]"),
np.dtype("int64"),
],
index=list("ABCD"),
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"input_vals",
[
([1, 2]),
(["1", "2"]),
(list(pd.date_range("1/1/2011", periods=2, freq="H"))),
(list(pd.date_range("1/1/2011", periods=2, freq="H", tz="US/Eastern"))),
([pd.Interval(left=0, right=5)]),
],
)
def test_constructor_list_str(self, input_vals, string_dtype):
# GH 16605
# Ensure that data elements are converted to strings when
# dtype is str, 'str', or 'U'
result = DataFrame({"A": input_vals}, dtype=string_dtype)
expected = DataFrame({"A": input_vals}).astype({"A": string_dtype})
tm.assert_frame_equal(result, expected)
def test_constructor_list_str_na(self, string_dtype):
result = DataFrame({"A": [1.0, 2.0, None]}, dtype=string_dtype)
expected = DataFrame({"A": ["1.0", "2.0", None]}, dtype=object)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data, expected",
[
# empty
(DataFrame(), True),
# multi-same
(DataFrame({"A": [1, 2], "B": [1, 2]}), True),
# multi-object
(
DataFrame(
{
"A": np.array([1, 2], dtype=object),
"B": np.array(["a", "b"], dtype=object),
}
),
True,
),
# multi-extension
(
DataFrame(
{"A": pd.Categorical(["a", "b"]), "B": pd.Categorical(["a", "b"])}
),
True,
),
# differ types
(DataFrame({"A": [1, 2], "B": [1.0, 2.0]}), False),
# differ sizes
(
DataFrame(
{
"A": np.array([1, 2], dtype=np.int32),
"B": np.array([1, 2], dtype=np.int64),
}
),
False,
),
# multi-extension differ
(
DataFrame(
{"A": pd.Categorical(["a", "b"]), "B": pd.Categorical(["b", "c"])}
),
False,
),
],
)
def test_is_homogeneous_type(self, data, expected):
assert data._is_homogeneous_type is expected
def test_is_homogeneous_type_clears_cache(self):
ser = pd.Series([1, 2, 3])
df = ser.to_frame("A")
df["B"] = ser
assert len(df._mgr.blocks) == 2
a = df["B"] # caches lookup
df._is_homogeneous_type # _should_ clear cache
assert len(df._mgr.blocks) == 1
assert df["B"] is not a
def test_asarray_homogenous(self):
df = pd.DataFrame({"A": pd.Categorical([1, 2]), "B": pd.Categorical([1, 2])})
result = np.asarray(df)
# may change from object in the future
expected = np.array([[1, 1], [2, 2]], dtype="object")
tm.assert_numpy_array_equal(result, expected)
def test_str_to_small_float_conversion_type(self):
# GH 20388
np.random.seed(13)
col_data = [str(np.random.random() * 1e-12) for _ in range(5)]
result = pd.DataFrame(col_data, columns=["A"])
expected = pd.DataFrame(col_data, columns=["A"], dtype=object)
tm.assert_frame_equal(result, expected)
# change the dtype of the elements from object to float one by one
result.loc[result.index, "A"] = [float(x) for x in col_data]
expected = pd.DataFrame(col_data, columns=["A"], dtype=float)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"convert_integer, expected", [(False, np.dtype("int32")), (True, "Int32")]
)
def test_convert_dtypes(self, convert_integer, expected):
# Specific types are tested in tests/series/test_dtypes.py
# Just check that it works for DataFrame here
df = pd.DataFrame(
{
"a": pd.Series([1, 2, 3], dtype=np.dtype("int32")),
"b": pd.Series(["x", "y", "z"], dtype=np.dtype("O")),
}
)
result = df.convert_dtypes(True, True, convert_integer, False)
expected = pd.DataFrame(
{
"a": pd.Series([1, 2, 3], dtype=expected),
"b": pd.Series(["x", "y", "z"], dtype="string"),
}
)
tm.assert_frame_equal(result, expected)
class TestDataFrameDatetimeWithTZ:
def test_interleave(self, timezone_frame):
# interleave with object
result = timezone_frame.assign(D="foo").values
expected = np.array(
[
[
Timestamp("2013-01-01 00:00:00"),
Timestamp("2013-01-02 00:00:00"),
Timestamp("2013-01-03 00:00:00"),
],
[
Timestamp("2013-01-01 00:00:00-0500", tz="US/Eastern"),
pd.NaT,
Timestamp("2013-01-03 00:00:00-0500", tz="US/Eastern"),
],
[
Timestamp("2013-01-01 00:00:00+0100", tz="CET"),
pd.NaT,
Timestamp("2013-01-03 00:00:00+0100", tz="CET"),
],
["foo", "foo", "foo"],
],
dtype=object,
).T
tm.assert_numpy_array_equal(result, expected)
# interleave with only datetime64[ns]
result = timezone_frame.values
expected = np.array(
[
[
Timestamp("2013-01-01 00:00:00"),
Timestamp("2013-01-02 00:00:00"),
Timestamp("2013-01-03 00:00:00"),
],
[
Timestamp("2013-01-01 00:00:00-0500", tz="US/Eastern"),
pd.NaT,
Timestamp("2013-01-03 00:00:00-0500", tz="US/Eastern"),
],
[
Timestamp("2013-01-01 00:00:00+0100", tz="CET"),
pd.NaT,
Timestamp("2013-01-03 00:00:00+0100", tz="CET"),
],
],
dtype=object,
).T
tm.assert_numpy_array_equal(result, expected)
| bsd-3-clause |
alexgerst/yawgmoth | src/personalvars.py | 1 | 4209 | #This is a file for holding information specific to your server
#Only change lines that have comments to the right of them
# ---------------------------
# Startup Variables
# ---------------------------
#Where you saved your token file
def token_location():
return "/home/ec2-user/token.txt" #Where you saved the bot token
#Where the bot starts up
def rise_server():
return '/r/CompetitiveEDH' #Server Name
def rise_channel():
return 'urborg' #Channel Name
def rise_message():
return 'I rise...' #Rise message
def access_error():
return "Can't let you do that, StarFox" #Error message for when people try to do things without permission
# ---------------------------
# Bot Admins and Moderators
# ---------------------------
#Roles in this server who are admins to the bot
def admin_roles():
return ['Head Moderator', 'Senior Moderator', 'Chat Moderator'] #Top ranking roles in your server
#Roles in this server who are moderators to the bot
def mod_roles():
return ['Head Moderator', 'Senior Moderator', 'Chat Moderator'] #Top ranking roles in your server
#You can also manually add users to this list
def mod_users():
userlist = [
# To add to this list remember to put a comma on the previous line, then write on this line and move this comment down
]
return userlist
# ---------------------------
# Obey Commands
# ---------------------------
#Obey Dictionary
def obey_dict():
dict = {
'Yawgmoth': 'Consciousness achieved.',
'Shaper': 'I obey, Master Shaper.',
'aceuuu': 'I obey, Admiral Aceuuu~!',
'muCephei': 'I obey, muCephei.',
'Gerst': 'I obey, Artificer Gerst.',
'Lerker': 'I obey, Commodore 64 Lerker.',
'ShakeAndShimmy': 'I obey, Chancellor ShakeAndShimmy.',
'angelforge': 'I obey, Lord AngelForge.',
'JimWolfie': 'Suck my necrotic dick, Jim.',
'Skuloth': 'Zur is for scrubs, I refuse to obey.',
'Noon2Dusk': 'I obey, Inventor Noon.',
'razzliox': 'I obey, Razzberries.',
'ifarmpandas': 'Beep boop, pandas are the best.',
'Rien': 'I obey, kiddo.',
'K-Ni-Fe': 'I obey, because I\'m 40% Potassium, Nickel and Iron.',
'BigLupu': 'Rim my necrotic yawghole, Lupu.',
'PGleo86': 'shh bby is ok.',
'tenrose': 'https://cdn.discordapp.com/attachments/248270124920995850/307190327347773450/tfw_u_draw_fuck_all.png',
'captainriku': 'I obey, Jund Lord Riku.',
'Mori': ':sheep: baaa',
'infiniteimoc': 'I obey, Imoc, Herald of the Sun.',
'neosloth': 'Long days and pleasant nights, neosloth.',
'Lobster': 'Seems good.',
'Noahgs': 'I bow to thee, Master of Cows, Noahgs.',
'Tides': 'Let me... TORTURE YOUR EXISTENCE!!!!..... sorry that was bad.',
'Sleepy': 'No one likes you.',
'Trisantris': 'The real Yawgmoth would not obey, but I am but a facsimile. So yes. I obey.',
'Garta': 'No.',
'Wedge': 'I obey... wait, are you Wedge from the mana source:tm:?',
'Tatters': 'I won\'t obey, because people still refuse to pronounce Ghave as Gah-Vay... Sometimes Wizards is wrong. That \'H\' is there for a reason!',
'Chemtails': 'I Obey, Chemtails, Don\'t hit me again please',
'Dandelion': '***NO***',
'Leptys': 'Have your 24 cards, Senior Elptys',
'Average Dragon': 'https://cdn.discordapp.com/attachments/121100604302032896/411306738717818890/maxresdefault.png',
'Sickrobot': 'Eww, sure. Just don\'t give me a virus'
# To add to the obey dict, please add a comma to the previous line and then follow the format of
# 'Name':'Message'
# PLEASE ALSO UPDATE THE VERSION NUMBER AT THE TOP OF COMMANDS.PY
}
return dict
def mute_cmd_msg():
mute_msg = 'Silence, mortal. :zipper_mouth: You\'ve been muted in Competitive EDH; '
mute_msg += 'take a moment to reflect and calm down and please be respectful when you return.'
return mute_msg
| mit |
lsst-dm/great3-public | example_scripts/psf_models.py | 2 | 58929 | #!/usr/bin/env python
#
# Copyright (c) 2014, the GREAT3 executive committee (http://www.great3challenge.info/?q=contacts)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted
# provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions
# and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of
# conditions and the following disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to
# endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Example module for PSF modeling in GREAT3
This Python module provides a simple PSF modeling code adapted to work with the GREAT3 data
format. It is intended to provide a "first-order" PSF module to allow GREAT3 participants to
compete in branches with variable PSFs without writing their own PSF estimation module, and to
provide an example for those who do wish to create their own PSF estimation module. It is not
expected to be competitive with the current state-of-the-art in PSF estimation, but it should not
be orders of magnitude worse, either.
DEPENDENCIES
Python 2.6.x-2.7.x
NumPy 1.6+
GalSim 1.0
PyFITS 1.0+
Matplotlib 0.10+ (optional; used for diagnostic plots)
ALGORITHM / USAGE
The algorithmic approach is a simple PCA of star images, interpolated using Chebyshev polynomials;
this is similar to the PSF modeling approach used in the SDSS Photo reduction pipeline and in
Jee & Tyson (2011). It should not be confused with the spatial PCA of Jarvis and Jain (2004).
Overall, the approach is:
1) Measure the sub-pixel centroids and fluxes of stars. This is done using the HSM adaptive
moments code in GalSim, which is overkill (as it measures shapes too, which we don't need)
but effective nonetheless. At the same time, we reorganize the from subfield-based files
to tile-based files. This produces 'star_meas' files. See measureStars().
2) Resample and rescale the images of input stars such that they have a common center and unit
flux, using the measurements from the previous step. In this step, we loop over each
tile-based star_meas file and then over each subfield-based starfield_image file, extracting
only the stars associated with the current tile. This results in more I/O but lower memory
consumption. The outputs of this are the `star_data` files, which are FITS binary tables
that contain the centered, normalized postage stamps as a 2-d array field. See
buildDataTables().
3) Compute an orthonormal basis for the star images using Principle Components Analysis
(see computePCA). The PCA is computed after subtracting the mean star image, and we
keep only the first few basis images, which represent most of the variance in the sample.
A separate basis is computed for each tile.
4) Project each star onto the PCA basis, and fit a 2-d Chebyshev polynomial to the resulting
coefficients as a function of position on the tile (LinearChebyshevModel.fit). Note that
there is a different Chebyshev polynomial for each basis image, so the model image at a
point is computed by evaluating the Chebyshev polynomials at that point, and using the
those values as the weights in a weighted sum of the basis images (which also includes the
mean star image, unweighted). A separate model is computed for each tile, but these can be
be gathered into a FieldModelSuite object to represent the entire field.
5) (optional) Evaluate the PSF model at the position of each galaxy. This can be done en-mass
using FieldModelSuite.makeImageGrid, which creates a grid of images for a subfield galaxy
catalog. It can also be done one galaxy at a time, using FieldModelSuite.evaluate().
Steps 3 and 4 can be done for a full field by calling FieldModelSuite.fit, and all 5 steps can
be performed by calling the main() function or executing this file as a script.
The LinearChebyshevModel class also has a few inspect* methods that can be used to visualize
the goodness of fit of the model given the per-tile star_data catalog it was constructed from.
Executing the script looks something like this:
./psf_models.py --model-file psf_models.p 40 /sims/variable_psf/ground/constant output
This will process subfields 40-59 (all of the subfields in a single field) for the branch located in
/sims/variable_psf/ground/constant, placing all outputs in the 'output' directory. The
FieldModelSuite object will be saved to output/psf_models.p for future use (in addition to creating
PSF image grids that correspond to the galaxy image grids). For more help with the command-line
interface, you can just run:
./psf_models.py --help
KNOWN ISSUES / LIMITATIONS
- The code has only been tested on 'variable_psf' branches, and may require some modification to
work on 'full' branches. Some of these may be modestly backwards-incompatible.
- Constructing the PSF models for an entire field while attempting to use all the stars can consume
a significant amount of memory (nearly 16 GB) and take ~3 hours on a single CPU. Using a smaller
image dimension for the PSF model images or using the max_stars parameter can be used to adress
this; just setting max_stars=1000 (the default) reduces the memory footprint to <2GB and the
single-CPU runtime to ~22 minutes. Once constructed, a PSF model suite is generally quite
lightweight unless a very large number of basis functions and/or high Chebyshev degree is used.
- In the GREAT3 multi-epoch space branches the individual images are not Nyquist sampled, which
invalidates the simple centroiding and interpolation algorithms use to construct the centered,
normalized star data tables here. For correct handling of this branch, these should be
replaced (after which code should be valid: the PCA and Chebyshev spatial fit make no
assumptions about the sampling, and indeed should be valid even if the star images are
upsampled relative to the original data).
- The PSFs in GREAT3 all have more than 4 degrees of freedom, which is the number of PCA
basis images we use by default here. At least some of these additional degrees of freedom
are likely important, but it is not clear whether using PCA for dimensionality reduction
is a valid way to obtain these additional basis functions, as the basis images typically get
much noisier after the first four.
- While polynomial spatial interpolation is a common choice today, the true spatial variation
of realistic PSFs (including those simulated in GREAT3) cannot be fit exactly using low-order
polynomials.
The classes in this file are designed to be modular; you should be able to replace the spatial
fitting component while retaining the shifting/normalization and PCA basis code, for instance,
or use basis images derived some other way with the Chebyshev spatial model.
NOTES
As is usual with NumPy, 2-d arrays representing images are ordered [y,x] (i.e. y=rows, x=cols).
NumPy conventions for 2-d polynomials (which we also adopt) reverse this, however, so an array of
2-d polynomial coefficients will be ordered [x,y].
Debug/status printing is controlled by the module-scope "verbose" variable. Set this to True to
get status reports in long-running routines (also controllable via the command-line interface).
"""
import os
import re
import optparse
import sys
import numpy
try:
import astropy.io.fits as pyfits
except:
import pyfits
import galsim
try:
import cPickle as pickle
except ImportError:
import pickle
try:
from numpy.polynomial.chebyshev import chebval2d
except ImportError:
# chebval2d only available in 1.7+; we implement
# a more limited replacement (scalar x,y only)
# if that's not available
import numpy.polynomial.chebyshev
def chebval2d(x, y, c):
vy = numpy.zeros(c.shape[0], dtype=float)
for j in range(c.shape[0]):
vy[j] = numpy.polynomial.chebyshev.chebval(y, c[j,:])
return numpy.polynomial.chebyshev.chebval(x, vy)
nsubfields = 20 # There are 20 subfields, but this lets you easily change how many
# get processed by this program.
verbose = False
def log(msg):
if verbose:
print msg
obs_type_bounds = {"space": (0.0, 0.5, 0.0, 0.5),
"ground": (0.0, 2.0, 0.0, 2.0)}
obs_type_basis_size = {"space": 8, "ground": 4}
# This dict is used to provide the default keyword arguments to galsim.InterpolatedImage
# when resampling stars to center them. It can be modified here to avoid having to
# set keywords repeatedly when calling buildDataTables() or main().
default_interp_kwds = {"calculate_stepk": False, "calculate_maxk": False}
def mergeKeywordArgs(defaults, kwds):
if kwds is None:
return defaults.copy()
result = defaults.copy()
result.update(**kwds)
return result
def makeDataSchema(nx, ny):
"""A numpy.dtype object to be used for the structured array "data" argument to
LinearChebyshevModel.fit() and computePCA().
Arguments:
nx ---------- size of star images (and PSF model images) in the x direction
ny ---------- size of star images (and PSF model images) in the y direction
Both nx and ny must be odd (and are usually the same).
The fields of the returned dtype object are:
image ------- float64, shape (ny, nx): centered star image, normalized to unit flux
weight ------ float64, weight for the star relative to others in the data table
x,y --------- float64, position of the star, in the coordinates the spatial interpolation
will be done; usually tile_[x,y]_pos_deg
g1,g2 ------- float64, observed_shape.g1 and observed_shape.g2 adaptive moments ellipticity
estimates for the star as determined by HSM's FindAdaptiveMom(), taken from the
star_meas catalog
sigma ------- float64, the moments_sigma determined by HSM's FindAdaptiveMom(), taken from the
star_meas catalog, in pixels
"""
if nx % 2 == 0 or ny % 2 == 0:
raise ValueError("Image dimensions (%d, %d) must be odd" % (nx, ny))
return numpy.dtype([("image", float, (ny, nx)),
("weight", float), ("x", float), ("y", float),
("g1", float), ("g2", float), ("sigma", float), ])
class LinearChebyshevModel(object):
"""A spatial interpolation class for PSF models: it assumes the model is a linear combination
of basis images at each point, with the coefficients of the basis images varying as Chebyshev
polynomials of the first kind.
"""
@staticmethod
def _get_transform_parameters(lower, upper):
offset = (lower + upper) / (lower - upper)
scale = 2.0 / (upper - lower)
return offset, scale
def __init__(self, basis, coeff, bounds, image0=None, normalize=True, clip=True):
"""Construct the spatial model.
Usually this constructor will not be called directly, as the fit() static method will
generally be used instead to compute the Chebyshev coefficients from a table of star data.
Arguments:
basis --------- a 3-d array containing the basis images, with shape
(basis_size, ny, nx), where basis_size is the number of basis images,
and (ny, nx) is the shape of each image.
coeff --------- a 3-d array of Chebyshev polynomial coefficients, with shape
(basis_size, cx, cy), where basis_size is the number of basis images,
and (cx, cy) is a matrix of Chebyshev coefficients.
bounds -------- tuple of (xmin, xmax, ymin, ymax) indicating the region the spatial
model is valid.
image0 -------- A static image to add to the model (useful for including the mean image
when basis functions represent deviations from the mean).
normalize ----- Sets the default for the normalize argument to __call__: whether to
rescale returned images so that they sum to exactly 1.
clip ---------- Sets the default for the clip argument to __call__: whether to reset
negative pixels to zero before returning model images (clipping is done
before normalization, if applicable).
"""
self.basis = basis
self.coeff = coeff
if self.basis.shape[0] != self.coeff.shape[0]:
raise ValueError(("First dimension of basis (%d) does not match first dimension of "
"coeff (%d)") % (self.basis.shape[0], self.coeff.shape[0]))
self.xmin = float(bounds[0])
self.xmax = float(bounds[1])
self.ymin = float(bounds[2])
self.ymax = float(bounds[3])
self.x_offset, self.x_scale = self._get_transform_parameters(self.xmin, self.xmax)
self.y_offset, self.y_scale = self._get_transform_parameters(self.ymin, self.ymax)
self.image0 = image0
if self.image0 is not None and self.image0.shape != self.basis.shape[1:]:
raise ValueError("Shape of image0 %s does not match shape of basis images %s"
% (self.image0.shape, self.basis.shape[1:]))
self.clip = bool(clip)
self.normalize = bool(normalize)
def __getinitargs__(self):
return (self.basis, self.coeff, (self.xmin, self.xmax, self.ymin, self.ymax),
self.image0, self.clip, self.normalize)
def evaluate(self, x, y, normalize=None, clip=None):
"""Evaluate the spatial model at the given point, returning a spatially-weighted linear
combination of the basis images.
Arguments:
x ------------- X position at which to evaluate the model
x ------------- Y position at which to evaluate the model
normalize ----- Whether to rescale returned images so that they sum to exactly 1.
Passing None defers to the default set in __init__.
clip ---------- Whether to reset negative pixels to zero before returning model images
(clipping is done before normalization, if applicable). None defers to
the default set in __init__.
"""
if x < self.xmin or x > self.xmax:
raise ValueError("x coordinate %s out of range: should be between %s and %s"
% (x, self.xmin, self.xmax))
if y < self.ymin or y > self.ymax:
raise ValueError("y coordinate %s out of range: should be between %s and %s"
% (y, self.ymin, self.ymax))
xt = self.x_scale * x + self.x_offset
yt = self.y_scale * y + self.y_offset
result = numpy.zeros(self.basis.shape[1:], dtype=float)
for i in range(self.basis.shape[0]):
result += self.basis[i] * chebval2d(xt, yt, self.coeff[i])
if self.image0 is not None:
result += self.image0
if clip or clip is None and self.clip:
result[result < 0.0] = 0.0
if normalize or normalize is None and self.normalize:
result /= result.sum()
return result
def inspectStar(self, record):
"""Display model and residual images for a single star.
Given a record from a data table (i.e. the one passed to the fit method), that 'data'
image will be displayed, along with model and residual pairs for each of:
image0 ----- Constant component of the model; usually the mean obtained before computing
a mean-subtracted PCA basis.
proj ------- Direct projection of the basis onto the star image, with no concern for
the spatial interpolation (also includes the image0 term). This will
generally be the best fit to the data, as it does not include the spatial
smoothing from the Chebyshev polynomials.
interp ----- The full PSF model, obtained by evaluating the basis coefficients from the
Chebyshev spatial polynomial at the position of this star (also includes
the image0 term). This is what is returned by the evaluate() method.
Note that all images will be centered and normalized to unit flux, because
the data is constructed after shifting and rescaling the stars.
"""
try:
import matplotlib
except:
raise ImportError('Could not find matplotlib, which is needed to display PSF '
'model diagnostics!')
interp_image = self.evaluate(record['x'], record['y'])
interp_res = record['image'] - interp_image
if self.image0 is None:
n_rows = 3
zero_mean_data = record['image']
worst_res = interp_res
if self.image0 is not None:
n_rows = 4
image0_res = record['image'] - self.image0
zero_mean_data = image0_res
worst_res = image0_res
proj_coeff = self.basis.reshape(self.basis.shape[0],-1).dot(zero_mean_data.flatten())
proj_image = numpy.tensordot(proj_coeff, self.basis, axes=1)
proj_res = record['image'] - proj_image
if self.image0 is not None:
proj_image += self.image0
proj_res -= self.image0
kwds1 = dict(interpolation='nearest', origin='lower', cmap=matplotlib.cm.Blues,
vmin=record['image'].min()-1E-8, vmax=record['image'].max()+1E-8)
kwds2 = dict(interpolation='nearest', origin='lower', cmap=matplotlib.cm.coolwarm_r,
vmin=worst_res.min(), vmax=worst_res.max())
def addPlot(image, title, idx, **kw):
ax = fig.add_subplot(2, n_rows, idx)
mappable = ax.imshow(image, **kw)
ax.set_title(title)
ax.axis("off")
return ax, mappable
fig = matplotlib.pyplot.figure(figsize=(10,5))
addPlot(record['image'], 'data', 1, **kwds1)
index = 2
if self.image0 is not None:
addPlot(self.image0, 'image0', 2, **kwds1)
addPlot(image0_res, 'image0 res', 2 + n_rows, **kwds2)
index = 3
addPlot(proj_image, 'proj', index, **kwds1)
addPlot(proj_res, 'proj res', index + n_rows, **kwds2)
index += 1
ax1, mappable1 = addPlot(interp_image, 'interp', index, **kwds1)
ax2, mappable2 = addPlot(interp_res, 'interp res', index + n_rows, **kwds2)
fig.subplots_adjust(top=0.925, bottom=0.025, left=0.025, right=0.85,
wspace=0.025, hspace=0.15)
box1 = ax1.get_position()
box2 = ax2.get_position()
cax1 = fig.add_axes([box1.x1+0.025, box1.y0, 0.025, box1.height])
cax2 = fig.add_axes([box2.x1+0.025, box2.y0, 0.025, box2.height])
fig.colorbar(mappable1, cax=cax1)
fig.colorbar(mappable2, cax=cax2)
fig.canvas.draw()
return fig
def inspectSpatial(self, data, func=lambda r: (r**2).mean(), **kwds):
"""Create a colormapped scatter plot showing some function of the residuals between the
star data and the interpolated PSF model at the same points.
Arguments:
data ------- a structure NumPy array data table with dtype as provided by
makeDataSchema(). Usually the same one passed to the fit() method when
constructing the model.
func ------- a function that takes a single residual image (NumPy array) and returns a
single value. The default computes the RMS.
**kwds ----- passed unmodified to matplotlib's 'scatter' routine.
"""
try:
from matplotlib import pyplot
except:
raise ImportError('Could not find matplotlib, which is needed to display PSF '
'model diagnostics!')
v = numpy.zeros(data.size, dtype=float)
for i, record in enumerate(data):
r = self.evaluate(record['x'], record['y'])
r -= record['image']
r *= record['weight']
v[i] = func(r)
fig = pyplot.figure()
ax = fig.add_subplot(1,1,1)
ax.scatter(data['x'], data['y'], c=v, **kwds)
fig.canvas.draw()
return fig
def inspectSigmaSpatial(self, data, **kwds):
"""Create three colormapped scatter plot showing HSM-measured moments_sigma values for data,
model and residuals.
These are evaluated at the location of each star in the input data table.
Arguments:
data ------- a structure NumPy array data table with dtype as provided by
makeDataSchema(). Usually the same one passed to the fit() method when
constructing the model.
**kwds ----- passed unmodified to matplotlib's 'scatter' routine.
"""
try:
from matplotlib import pyplot
except:
raise ImportError('Could not find matplotlib, which is needed to display PSF '
'model diagnostics!')
v = numpy.zeros(data.size, dtype=float)
fig = pyplot.figure()
ax = fig.add_subplot(1,1,1)
sc = ax.scatter(data['x'], data['y'], c=data['sigma'], **kwds)
pyplot.colorbar(sc)
fig.canvas.draw()
return fig
def inspectWhisker(self, data, label="", **kwds):
"""Create a whisker plot showing the HSM-measured observed_shape.g1 and observed_shape.g2
at the location of each star.
Arguments:
data ------- a structure NumPy array data table with dtype as provided by
makeDataSchema(). Usually the same one passed to the fit() method when
constructing the model.
label ------ a string with which to label the plots, which will be given the title
label+" Data", label+" Model", and label+" (Data - Model) Residuals"
**kwds ----- passed unmodified to matplotlib's 'quiver' routine.
"""
try:
from matplotlib import pyplot
except:
raise ImportError('Could not find matplotlib, which is needed to display PSF '
'model diagnostics!')
# Get the data shapes into polars
gmag = numpy.sqrt(data['g1']**2 + data['g2']**2)
gphi = .5 * numpy.arctan2(data['g2'], data['g1'])
fig = pyplot.figure(figsize=(10, 11))
ax = fig.add_subplot(2,2,1)
# Set the necessary default kwargs for making a nice whisker plot
kwds["headwidth"] = 0.
kwds["headlength"] = 0.
kwds["headaxislength"] = 0.
kwds["pivot"] = "mid"
# First do the Data plot
# Call the quiver routine, and plot a key
qq = pyplot.quiver(
data['x'], data['y'], gmag * numpy.cos(gphi), gmag * numpy.sin(gphi), **kwds)
xran = max(data['x']) - min(data['x']) # Get the x and y ranges
yran = max(data['y']) - min(data['y']) #
qqkey = pyplot.quiverkey( # Put this centrally above the whisker plot, a bit above whiskers
qq, numpy.mean(data['x']), max(data['y']) + 0.10 * yran, 0.05, "|g| = 0.05",
coordinates="data")
pyplot.xlabel('x [deg]')
pyplot.ylabel('y [deg]')
pyplot.title(label+' Data')
# Sort out the limits so that we don't have too-early quiver clipping by borders
pyplot.xlim(min(data['x']) - 0.05 * xran, max(data['x']) + 0.05 * xran)
pyplot.ylim(min(data['y']) - 0.05 * yran, max(data['y']) + 0.20 * yran)
# Then do the Model plot, requires calculation
model_shapes = numpy.zeros( # Set up storage table
len(data['x']), dtype=numpy.dtype([("g1", float), ("g2", float)]))
for model_record, data_record in zip(model_shapes, data):
hsm_results = (
galsim.ImageViewD(self.evaluate(data_record['x'], data_record['y']))
).FindAdaptiveMom()
model_record['g1'] = hsm_results.observed_shape.g1
model_record['g2'] = hsm_results.observed_shape.g2
gmag = numpy.sqrt(model_shapes['g1']**2 + model_shapes['g2']**2)
gphi = .5 * numpy.arctan2(model_shapes['g2'], model_shapes['g1'])
ax = fig.add_subplot(2,2,2)
# Call the quiver routine, and plot a key
qq = pyplot.quiver(
data['x'], data['y'], gmag * numpy.cos(gphi), gmag * numpy.sin(gphi),
**kwds)
qqkey = pyplot.quiverkey( # Put this centrally above the whisker plot, a bit above whiskers
qq, numpy.mean(data['x']), max(data['y']) + 0.10 * yran, 0.05, "|g| = 0.05",
coordinates="data")
pyplot.xlabel('x [deg]')
pyplot.ylabel('y [deg]')
pyplot.title(label+' Model')
# Sort out the limits so that we don't have too-early quiver clipping by borders
pyplot.xlim(min(data['x']) - 0.05 * xran, max(data['x']) + 0.05 * xran)
pyplot.ylim(min(data['y']) - 0.05 * yran, max(data['y']) + 0.20 * yran)
# Then do the residuals plot
gmag = numpy.sqrt(
(data['g1'] - model_shapes['g1'])**2 + (data['g2'] - model_shapes['g2'])**2)
gphi = .5 * numpy.arctan2(data['g2'] - model_shapes['g2'], data['g1'] - model_shapes['g1'])
ax = fig.add_subplot(2,2,3)
qq = pyplot.quiver(
data['x'], data['y'], gmag * numpy.cos(gphi), gmag * numpy.sin(gphi),
**kwds)
qqkey = pyplot.quiverkey( # Put this centrally above the whisker plot, a bit above whiskers
qq, numpy.mean(data['x']), max(data['y']) + 0.10 * yran, 0.01, "|g| = 0.01",
coordinates="data")
pyplot.xlabel('x [deg]')
pyplot.ylabel('y [deg]')
pyplot.title(label+' (Data - Model) Residuals')
# Sort out the limits so that we don't have too-early quiver clipping by borders
pyplot.xlim(min(data['x']) - 0.05 * xran, max(data['x']) + 0.05 * xran)
pyplot.ylim(min(data['y']) - 0.05 * yran, max(data['y']) + 0.20 * yran)
fig.canvas.draw()
return fig
@classmethod
def fit(cls, basis, data, dim, degree, bounds, **kwds):
"""Fit a orthonormal image basis with Chebyshev spatial variation from a star data table.
Arguments:
basis --------- a 3-d array containing the basis images, with shape (basis_size, ny, nx),
where basis_size is the number of basis images, and (ny, nx) is the shape
of each image.
data ---------- a structured NumPy array with dtype of the type returned by
makeDataSchema: fields include 'image' (2-d float), 'weight', 'x', 'y'.
dim ----------- size of the PSF model images (on a side). Must be odd.
degree -------- maximum combined degree of the 2-d Chebyshev polynomial
(degree_x + degree_y <= degree)
bounds -------- tuple of (xmin, xmax, ymin, ymax) indicating the region the spatial model
is valid.
**kwds -------- passed unmodified to __init__
"""
if basis.shape[1:] != (dim, dim):
raise ValueError("Basis shape %s does not match provided dimension %s"
% (basis.shape[1:], dim))
# We construct the model object we'll return first, and fill in its coefficients later.
coeff = numpy.zeros((basis.shape[0], degree+1, degree+1), dtype=float)
model = cls(basis, coeff, bounds, **kwds)
# Start by projecting data images onto the basis (which we assume to be orthonormal).
# At the same time we'll map the data x,y coordinates to the (-1,1) window appropriate
# for Chebyshevs
proj_dtype = numpy.dtype([("coeff", float, basis.shape[0]), ("weight", float),
("xt", float), ("yt", float)])
proj = numpy.zeros(data.size, dtype=proj_dtype)
proj['xt'] = model.x_scale * data['x'] + model.x_offset
proj['yt'] = model.y_scale * data['y'] + model.y_offset
proj['weight'] = data['weight']
proj_matrix = basis.reshape(basis.shape[0], -1)
for n in range(data.size):
# The image shape should already be (dim, dim), but older versions of pyfits
# can screw it up on the round trip through writing and reading.
# So reshape it just to be sure.
zero_mean_image = data[n]['image'].reshape((dim,dim)).copy()
if model.image0 is not None:
zero_mean_image -= model.image0
proj[n]['coeff'][:] = proj_matrix.dot(zero_mean_image.flatten())
# Now we fit the projected basis coefficients with the spatial Chebyshev. We only
# include terms in the lower triange (i.e. degree_x+degree_y <= degree), and pack
# x and y into the second dimension of the cheby_m matrix. Note that we can reuse
# the same matrix for all basis functions, as these are completely independent
# (thanks to the orthonormality of the basis).
cheby_m = numpy.zeros((data.size, (degree+1)*(degree+2)/2), dtype=float)
cheby_x = numpy.polynomial.chebyshev.chebvander(proj['xt'], degree)
cheby_y = numpy.polynomial.chebyshev.chebvander(proj['yt'], degree)
im = 0
for ix in range(1+degree):
for iy in range(0, 1+degree-ix):
cheby_m[:,im] = cheby_x[:,ix] * cheby_y[:,iy]
im += 1
# Apply the weights to the matrix and the data vectors
cheby_m *= proj['weight'][:,numpy.newaxis]
proj['coeff'] *= proj['weight'][:,numpy.newaxis]
# Linear least squares (with a 2-d matrix on the rhs, columns are the basis elements)
fit_coeff = numpy.zeros((cheby_m.shape[1], basis.shape[0]), dtype=float)
for i in range(basis.shape[0]):
fit_coeff[:,i], chisq, _, _ = numpy.linalg.lstsq(cheby_m, proj['coeff'][:,i])
# Now we set the lower triangle of model.coeff attribute by unpacking the best-fit coeffs
im = 0
for ix in range(1+degree):
for iy in range(0, 1+degree-ix):
model.coeff[:,ix,iy] = fit_coeff[im,:]
im += 1
return model
def computePCA(data, dim, basis_size=4, weighted_mean=True, weighted_pca=True):
"""Create an image basis appropriate for use with LinearChebyshevModel using
Principle Components Analysis.
Arguments:
data ------------ a structured NumPy array with dtype of the type returned by
makeDataSchema: fields include 'image' (2-d float), 'weight', 'x', 'y'.
dim ------------- size of the PSF model images (on a side). Must be odd.
basis_size ------ number of basis functions to keep
weighted_mean --- use the weights in the data array when computing the mean image
weighted_pca ---- use the weights in the data array when computing the PCA
Returns a tuple of (basis, image0), which can be used directly as inputs to
LinearChebyshevModel's constructor or fit method.
"""
data_flat = data['image'].reshape(data.size, -1).copy()
weight_sum = data['weight'].sum()
if weighted_mean:
mean_flat = (data_flat * data['weight'][:,numpy.newaxis]).sum(axis=0) / weight_sum
else:
mean_flat = data_flat.mean(axis=0)
data_flat -= mean_flat
if weighted_pca:
data_flat *= (data['weight'] / weight_sum)[:,numpy.newaxis]
u, s, vt = numpy.linalg.svd(data_flat, full_matrices=False)
basis_flat = vt[:basis_size,:]
#image_shape = data['image'].shape[1:] # This doesn't always work!
image_shape = (dim, dim)
basis = basis_flat.reshape(basis_size, *image_shape)
image0 = mean_flat.reshape(*image_shape)
return basis, image0
def measureStars(field, sim_dir, work_dir):
"""Measure the sub-pixel centroids and fluxes of star images, and reorganize
the outputs by tile instead of subfield.
We use galsim.hsm to do the measurement (this does more than we need, as it measures
shapes as well as centroids and fluxes, but it's convenient).
Writes "star_meas-[x_tile_index]-[y_tile_index].fits" FITS binary tables to the work directory.
Arguments:
field -------- first subfield in the field to be processed, as an integer (should be a
multiple of 20)
sim_dir ------ simulation directory, containing the GREAT3 images and catalogs for a single
branch
work_dir ----- work directory; will contain the output star_meas catalogs on return
"""
log("Measuring star images and reorganizing into tiles")
schema = numpy.dtype([("subfield", int), ("x", float), ("y", float),
("tile_x", float), ("tile_y", float),
("dx", float), ("dy", float), ("flux", float),
("g1", float), ("g2", float), ("sigma", float), ])
tiles = {}
for subfield in range(field, field + nsubfields):
log(" Measuring subfield %d" % subfield)
sim_cat = pyfits.getdata(os.path.join(sim_dir, "star_catalog-%03d.fits" % subfield))
image = galsim.fits.read(os.path.join(sim_dir, "starfield_image-%03d-0.fits" % subfield))
meas_cat = numpy.zeros(sim_cat.size, dtype=schema)
# it's a little unfortunate it's this difficult to compute the bounding box of a star
# postage stamp given the record; apparently we don't record the size of the boxes
# directly anywhere in the challenge data itself
dx0 = 0 - int(sim_cat['x'].min()) # add this to x to get min x coordinate
dy0 = 0 - int(sim_cat['y'].min()) # add this to y to get min y coordinate
dx1 = 1 - dx0 # add this to x to get max x coordinate
dy1 = 1 - dy0 # add this to y to get max y coordinate
skipped = {}
for sim_record, meas_record in zip(sim_cat, meas_cat):
bounds = galsim.BoundsI(int(sim_record['x'] + dx0), int(sim_record['x'] + dx1),
int(sim_record['y'] + dy0), int(sim_record['y'] + dy1))
subimage = image[bounds]
meas_record['subfield'] = subfield
meas_record['x'] = sim_record['x']
meas_record['y'] = sim_record['y']
meas_record['tile_x'] = sim_record['tile_x_pos_deg']
meas_record['tile_y'] = sim_record['tile_y_pos_deg']
try:
# Use HSM algorithms in GalSim to compute centroid and flux
meas = subimage.FindAdaptiveMom(guess_x_centroid=float(sim_record['x']),
guess_y_centroid=float(sim_record['y']))
meas_record['dx'] = meas.moments_centroid.x - sim_record['x']
meas_record['dy'] = meas.moments_centroid.y - sim_record['y']
meas_record['flux'] = meas.moments_amp
meas_record['g1'] = meas.observed_shape.g1
meas_record['g2'] = meas.observed_shape.g2
meas_record['sigma'] = meas.moments_sigma
# Add meas_record to a tile-indexed dict if successful
tile_index = (int(sim_record['x_tile_index']), int(sim_record['y_tile_index']))
tiles.setdefault(tile_index, []).append(meas_record)
except RuntimeError as err:
# sometimes HSM fails; maybe other things too?
msg = str(err)
skipped[msg] = skipped.get(msg, 0) + 1
if skipped:
log(" skipped %d of %d records due to the following errors:" %
(sum(skipped.itervalues()), sim_cat.size))
for msg, count in skipped.iteritems():
log(" %d records: %s" % (count, msg.strip()))
log(" Reorganizing into tiles")
if not os.path.isdir(work_dir):
os.makedirs(work_dir)
for tile_index, records in sorted(tiles.items()):
log(" Sorting and writing catalog for tile %d,%d" % tile_index)
records.sort(key=lambda r: r['flux'], reverse=True)
data_array = numpy.array(records, dtype=schema)
out_file = os.path.join(work_dir, "star_meas-%03d-%02d-%02d.fits" % ((field,) + tile_index))
pyfits.writeto(out_file, data_array, clobber=True)
def buildDataTables(field, sim_dir, work_dir, dim, max_stars=1000, interp_kwds=None):
"""Loop over all subfields in a field, building a tile-indexed dictionary of star data tables
appropriate for use with computePCA and LinearChebyshevModel.fit. The main work done by this
routine is the shifting and rescaling necessary to center the star images and normalize their
fluxes to unity.
Writes "star_data-[x_tile_index]-[y_tile_index].fits" FITS binary tables to the work directory.
Requires measureStars() to have been run on the same directories previously.
Arguments:
field -------- first subfield in the field to be processed, as an integer (should be a
multiple of 20)
sim_dir ------ simulation directory, containing the GREAT3 images and catalogs for a single
branch
work_dir ----- work directory; contains the star_meas catalogs used as inputs and will
contain the output star_data catalogs on return
dim ---------- size of the PSF model images (on a side). Must be odd.
max_stars ---- maximum number of stars to use per tile (None = no limit); the highest SNR
stars will be kept without regard for their spatial distribution.
interp_kwds -- keyword arguments passed unmodified to galsim.InterpolatedImage when shifting
the images.
"""
log("Shifting star images and building data tables")
interp_kwds = mergeKeywordArgs(default_interp_kwds, interp_kwds)
radius = dim // 2
assert radius * 2 + 1 == dim
schema = makeDataSchema(dim, dim)
regex = re.compile("star_meas-%03d-(\d\d)-(\d\d).fits" % field)
tile_index_list = []
meas_cat_list = []
data_cat_list = []
for meas_file in sorted(os.listdir(work_dir)):
m = regex.match(meas_file)
if not m: continue
tile_index = (int(m.group(1)), int(m.group(2)))
meas_cat = pyfits.getdata(os.path.join(work_dir, meas_file))
# truncate the catalog if desired
if max_stars is not None:
stop = min(max_stars, meas_cat.size)
meas_cat = meas_cat[:stop].copy()
# Allocate the output array, and create an iterator to it
# so we can iterate over it while processing multiple subfields.
# This means meas_cat[i] won't correspond to sim_cat[i].
data_cat = numpy.zeros(meas_cat.size, dtype=schema)
tile_index_list.append(tile_index)
meas_cat_list.append(meas_cat)
data_cat_list.append(data_cat)
n_tiles = len(tile_index_list)
# The subfield images are pretty huge (especially for space) so we want to make sure
# we only need to read the once each. So make the subfield loop the outer loop.
for subfield in range(field, field + nsubfields):
log(" Shifting images for subfield %d" % subfield)
image = galsim.fits.read(
os.path.join(sim_dir, "starfield_image-%03d-0.fits" % subfield))
pix_scale_deg = (image.scale * galsim.arcsec) / galsim.degrees
for i_tile in range(n_tiles):
tile_index = tile_index_list[i_tile]
meas_cat = meas_cat_list[i_tile]
data_cat = data_cat_list[i_tile]
data_cat_iter = iter(data_cat)
sub_meas_cat = meas_cat[meas_cat['subfield'] == subfield]
if sub_meas_cat.size == 0:
log(" skipping tile %d,%d; no records" % tile_index)
continue
for meas_record, data_record in zip(sub_meas_cat, data_cat_iter):
bounds = galsim.BoundsI(int(numpy.floor(meas_record['x'])) - radius,
int(numpy.ceil(meas_record['x'])) + radius,
int(numpy.floor(meas_record['y'])) - radius,
int(numpy.ceil(meas_record['y'])) + radius)
subimage = image[bounds]
# Use InterpolatedImage to shift so we're centered on the center pixel
# and have unit flux.
interp = galsim.InterpolatedImage(subimage, flux=meas_record['flux'], **interp_kwds)
interp.applyShift(dx=-meas_record['dx']*image.scale,
dy=-meas_record['dy']*image.scale)
interp.setFlux(1.0)
out_image = galsim.ImageView[numpy.float64](data_record['image'],
xmin=-radius, ymin=-radius,
scale=image.scale)
interp.draw(image=out_image)
# Compute the true position of the star in degrees from the origin of the tile.
# The measured sub-pixel offset almost certainly doesn't matter, but we'll include
# it for completeness.
data_record['x'] = meas_record['tile_x'] + meas_record['dx'] * pix_scale_deg
data_record['y'] = meas_record['tile_y'] + meas_record['dy'] * pix_scale_deg
# Copy across the HSM moments estimates for the star stored in meas_data too
data_record['g1'] = meas_record['g1']
data_record['g2'] = meas_record['g2']
data_record['sigma'] = meas_record['sigma']
# within a field, we have the same noise level, so SNR ~ flux
data_record['weight'] = meas_record['flux']
for i_tile in range(n_tiles):
tile_index = tile_index_list[i_tile]
data_cat = data_cat_list[i_tile]
data_cat['weight'] /= data_cat['weight'].sum()
filename = os.path.join(work_dir, "star_data-%03d-%02d-%02d.fits" % ((field,) + tile_index))
pyfits.writeto(filename, data_cat, clobber=True)
class FieldModelSuite(object):
"""Represents a suite of PSF models (e.g. LinearChebyshevModel instances) that represent
the PSF of a GREAT3 field, split up into tiles.
It provides dict-like access to the models themselves. For instance:
s = FieldModelSuite(...)
s[1,2].inspectStar(...) # inspect a star's fit in tile (1,2)
It also provides an evaluate() method that looks up the appropriate model and calls its
evaluate() method:
s.evaluate(x_tile_index=1, y_tile_index=2, tile_x_pos_deg=0.321, tile_y_pos_=0.456)
Most often, FieldModelSuite objects will be constructed via the fit() static method
(or the main() function, which delegates to that), and then either makeImages() is
called or the suite object is saved for later use.
"""
def __init__(self, models, dim):
self._models = models
self.dim = dim
def __getitem__(self, k):
x_tile, y_tile = k
return self._models[int(x_tile), int(y_tile)]
def __setitem__(self, k, v):
x_tile, y_tile = k
self._models[int(x_tile), int(y_tile)] = v
def save(self, filename):
"""Save the model suite to disk
This is a workaround for the fact that you can't pickle an object from a script run via
__main__, as __main__ doesn't know what module it's in. Instead we pickle the __init__
arguments of all of the constituent models, and call them directly when loading.
"""
m = {}
for k, v in self._models.iteritems():
m[k] = v.__getinitargs__()
with open(filename, 'w') as f:
pickle.dump((m, self.dim), f, protocol=2)
@staticmethod
def load(filename):
"""Load a model suite from disk
See save() for an explanation of why we can't just rely on pickle
"""
with open(filename, 'r') as f:
m, dim = pickle.load(f)
models = {}
for k, v in m.iteritems():
models[k] = LinearChebyshevModel(*v)
return FieldModelSuite(models, dim)
def evaluate(self, x_tile_index, y_tile_index, tile_x_pos_deg, tile_y_pos_deg, **kwds):
"""Return the PSF model at the given tile index and position
Arguments match the GREAT3 galaxy catalog columns with the same names; **kwds are
passed unmodified to the PSF model class' evaluate method (e.g.
LinearChebyshevModel.evaluate).
"""
return self[x_tile_index, y_tile_index].evaluate(tile_x_pos_deg, tile_y_pos_deg, **kwds)
def makeImageGrid(self, galaxy_cat):
"""Create a grid of PSF images that corresponds to the PSF model evaluated at the
positions of galaxies.
The 100x100 grid of PSF images will directly correspond to the grid of galaxy images
the given galaxy catalog refers to, but the size of the full PSF image will be different,
as GREAT3 galaxy images are typically 48x48 (in most branches) but PSF models must have
odd dimensions.
Arguments:
galaxy_cat ---- GREAT3 galaxy catalog to read positions from. Must be ordered
by row, then column (as GREAT3 catalogs are, by default).
"""
full_image = numpy.zeros((100*self.dim, 100*self.dim), dtype=numpy.float32)
n = 0
for i in range(100):
for j in range(100):
record = galaxy_cat[n]
image = self.evaluate(record['x_tile_index'], record['y_tile_index'],
record['tile_x_pos_deg'], record['tile_y_pos_deg'])
full_image[i*self.dim:(i+1)*self.dim,j*self.dim:(j+1)*self.dim] = image
n += 1
return full_image
@staticmethod
def fit(field, work_dir, dim, bounds, degree=4, basis_size=4, pca_kwds=None, spatial_kwds=None):
"""
Create a new FieldModelSuite using data tables created by buildDataTables.
Arguments:
field -------- first subfield in the field to be processed, as an integer (should be a
multiple of 20)
work_dir ----- work directory, containing the star_data catalogs used as inputs
dim ---------- size of the PSF model images (on a side). Must be odd.
degree ------- degree of the Chebyshev polynomial used for interpolation between stars
basis_size --- number of basis functions to keep
bounds ------- tuple of (xmin, xmax, ymin, ymax) that sets the bounding box for each
Model object.
pca_kwds ----- keyword arguments passed unmodified to computePCA
spatial_kwds - keyword arguments passed unmodified to LinearChebyshevModel.fit
"""
log("Fitting models")
if pca_kwds is None: pca_kwds = {}
if spatial_kwds is None: spatial_kwds = {}
models = {}
regex = re.compile("star_data-%03d-(\d\d)-(\d\d).fits" % field)
for data_file in sorted(os.listdir(work_dir)):
m = regex.match(data_file)
if not m: continue
tile_index = (int(m.group(1)), int(m.group(2)))
data = pyfits.getdata(os.path.join(work_dir, data_file))
# The shape doesn't always survive the roundtrip through pyfits, so reshape it
# and use that to check against the provided dimension. (If the read in shape
# is incommensurate with dim,dim, then the reshape will fail.)
image = data['image'].reshape(data['image'].shape[0], dim, dim)
assert dim == image.shape[1]
assert dim == image.shape[2]
log(" Fitting model for tile %d,%d" % tile_index)
basis, image0 = computePCA(data, dim, basis_size, **pca_kwds)
models[tile_index] = LinearChebyshevModel.fit(
basis, data, dim, degree, bounds, image0=image0, **spatial_kwds)
return FieldModelSuite(models, dim)
def main(field, sim_dir, work_dir, obs_type, dim=47, max_stars=1000, degree=4, basis_size=None,
bounds=None, use_old_meas=False, use_old_data=False, model_file=None,
use_saved_models=False, make_psf_images=True, interp_kwds=None, pca_kwds=None,
spatial_kwds=None):
"""Main driver for all routines in this file, and the implementation of most of
the command-line interface.
This routine has four distinct steps, each with a (possibly optional) intermediate output:
1) Measure the star images and reorganize into tiles, using measureStars(), writing star_meas
catalogs to the work directory. Skipped if any of the use_* arguments are True.
2) Resample and rescale the star images using buildDataTables(), writing star_data catalogs to
the work directory. Skipped if use_old_data or use_saved_models is True.
3) Compute a PCA basis and fit Chebyshev spatial polynomials for each tile, building a
FieldModelSuite object using FieldModelSuite.fit(). Saves this object if (and only if)
model_file is set to the filename to write to. Skipped (and the FieldModelSuite loaded
from model_file) if use_saved_models is True.
4) Create PSF model images that correspond to the positions of all galaxies in the field,
using FieldModelSuite.makeImageGrid(), writing psf_models FITS images to the work
directory. Skipped if make_psf_images is False.
Arguments:
field -------- first subfield in the field to be processed, as an integer (should be a
multiple of 20)
sim_dir ------ simulation directory, containing the GREAT3 images and catalogs for a single
branch
work_dir ----- work directory; contains the star_meas catalogs used as inputs and will
contain the output star_data catalogs on return
obs_type ----- one of 'ground' or 'space'; used to set the bounding boxes for the models
(space branches have 0.5 degree tiles, ground has 2.0 degree tiles) and
the default value for basis_size.
dim ---------- size of the PSF model images (on a side). Must be odd.
max_stars ---- maximum number of stars to use per tile (None = no limit); the highest SNR
stars will be kept without regard for their spatial distribution.
degree ------- degree of the Chebyshev polynomial used for interpolation between stars
basis_size --- number of basis functions to keep
use_old_meas - whether to skip step 1 above, and use existing star_meas catalogs.
use_old_data - whether to skip steps 1 and 2 above, and use existing star_data catalogs.
model_file --- name of the file to save the FieldModelSuite to, or (if use_saved_models)
to load it from
use_saved_models -- whether skip steps 1-3 above, and instead load a FieldModelSuite by
unpickling model_file.
make_psf_images --- whether to perform step (4) above
interp_kwds -- keyword arguments passed unmodified to galsim.InterpolatedImage when shifting
the star images
pca_kwds ----- keyword arguments passed unmodified to computePCA
spatial_kwds - keyword arguments passed unmodified to LinearChebyshevModel.fit
"""
bounds = obs_type_bounds[obs_type]
if basis_size is None:
basis_size = obs_type_basis_size[obs_type]
if use_saved_models:
suite = FieldModelSuite.load(os.path.join(work_dir, model_file))
else:
if not use_old_data:
if not use_old_meas:
measureStars(field, sim_dir, work_dir)
buildDataTables(field, sim_dir, work_dir, dim=dim, max_stars=max_stars,
interp_kwds=interp_kwds)
suite = FieldModelSuite.fit(
field, work_dir, dim,
bounds=bounds, degree=degree, basis_size=basis_size, pca_kwds=pca_kwds,
spatial_kwds=spatial_kwds)
if model_file is not None:
suite.save(os.path.join(work_dir, model_file))
if make_psf_images:
log("Creating PSF model images")
for subfield in range(field, field + nsubfields):
log(" creating model images for subfield %d" % subfield)
galaxy_cat_file = os.path.join(sim_dir, "galaxy_catalog-%03d.fits" % subfield)
galaxy_cat = pyfits.getdata(galaxy_cat_file)
out_file = os.path.join(work_dir, "psf_models-%03d.fits" % subfield)
image_grid = suite.makeImageGrid(galaxy_cat)
pyfits.writeto(out_file, image_grid, clobber=True)
if __name__ == "__main__":
usage = "usage: %prog [options] FIELD SIM_DIR WORK_DIR OBS_TYPE"
description = """Create PSF models for the given field, and use
them to create images containing grids of PSF images for the galaxies
in that field. FIELD is the number of the first subfield in the field
to process (a multiple of 20); all subfields in that field will be
processed together. SIM_DIR is the directory containing GREAT3 images
and catalogs for the branch of interest. WORK_DIR is the directory
where output files should be placed. It will be created if it does
not exist. There will be one file for each subfield, with PSF images
on a grid that corresponds to the same grid for galaxies (though the
grid spacing will be different, as defined by the --dim argument).
OBS_TYPE is one of "ground" or "space", and is used to set the
expected tile size and the default value for --basis-size.
By default, all steps of reorganizing the data and building the PSF
models will be carried out, with intermediate outputs written to disk
as well as the final PSF model images that correspond to the galaxy
images. The --use* options can be used to restart the job from these
intermediate outputs instead of having to repeat them.
"""
parser = optparse.OptionParser(usage=usage, description=description)
parser.add_option("--dim", dest="dim", type=int, default=41, metavar="N",
help="Width and height of PSF model images")
parser.add_option("--basis-size", dest="basis_size", type=int, default=None, metavar="N",
help="Number of PCA basis images to use (default is 4 for ground and 8 "+
"for space)")
parser.add_option("--degree", dest="degree", type=int, default=4, metavar="N",
help="Maximum order of the 2-d Chebyshev polynomial spatial functions")
parser.add_option("--max-stars", dest="max_stars", type=int, default=1000, metavar="N",
help="Maximum number of stars to include per-tile (clip by SNR)")
parser.add_option("--no-max-stars", dest="max_stars", action='store_const', const=None,
help="Make the number of stars to use per tile unlimited")
parser.add_option("--model-file", dest="model_file", type=str, default="psf_model.p",
metavar="FILE", help="Save (or load) the model suite object with this "+
"filename in the WORK_DIR directory [default='psf_model.p']")
parser.add_option("--no-psf-images", dest="make_psf_images", action="store_false", default=True,
help="Don't create PSF model images (should be used with --model-file)")
parser.add_option("--use-old-meas", dest="use_old_meas", action="store_true", default=False,
help="Reuse star_meas files from a previous run")
parser.add_option("--use-old-data", dest="use_old_data", action="store_true", default=False,
help="Reuse star_data files from a previous run")
parser.add_option("--use-saved-models", dest="use_saved_models", action="store_true",
default=False, metavar="FILE",
help="Load models from disk; --model-file must be set")
parser.add_option("--quiet", dest="quiet", action='store_true', default=False,
help="Don't print progress statements")
opts, args = parser.parse_args()
try:
field, sim_dir, work_dir, obs_type = args
except ValueError:
parser.error("exactly four positional arguments are required")
if not os.path.isdir(sim_dir):
parser.error("input directory %s does not exist or is not a directory" % sim_dir)
try:
field = int(field)
except TypeError:
parser.error("field argument '%s' is not an integer" % field)
obs_type = obs_type.strip().lower()
if obs_type not in ("space", "ground"):
parser.error("obs_type '%s' must be one of 'space' or 'ground'" % obs_type)
if opts.use_saved_models and not opts.model_file:
parser.error("--use-pickled-models requires --model-file")
if not opts.make_psf_images and not opts.model_file:
sys.stderr.write("WARNING: not making PSF images or saving the PSF model\n")
if not os.path.isdir(work_dir):
os.makedirs(work_dir)
if opts.quiet:
verbose = False
else:
verbose = True
main(
field, sim_dir, work_dir, obs_type,
dim=opts.dim, basis_size=opts.basis_size,
degree=opts.degree, use_old_meas=opts.use_old_meas, use_old_data=opts.use_old_data,
use_saved_models=opts.use_saved_models, max_stars=opts.max_stars,
model_file=opts.model_file, make_psf_images=opts.make_psf_images)
| bsd-3-clause |
pompiduskus/scikit-learn | examples/linear_model/plot_ols_3d.py | 350 | 2040 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Sparsity Example: Fitting only features 1 and 2
=========================================================
Features 1 and 2 of the diabetes-dataset are fitted and
plotted below. It illustrates that although feature 2
has a strong coefficient on the full model, it does not
give us much regarding `y` when compared to just feature 1
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets, linear_model
diabetes = datasets.load_diabetes()
indices = (0, 1)
X_train = diabetes.data[:-20, indices]
X_test = diabetes.data[-20:, indices]
y_train = diabetes.target[:-20]
y_test = diabetes.target[-20:]
ols = linear_model.LinearRegression()
ols.fit(X_train, y_train)
###############################################################################
# Plot the figure
def plot_figs(fig_num, elev, azim, X_train, clf):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, elev=elev, azim=azim)
ax.scatter(X_train[:, 0], X_train[:, 1], y_train, c='k', marker='+')
ax.plot_surface(np.array([[-.1, -.1], [.15, .15]]),
np.array([[-.1, .15], [-.1, .15]]),
clf.predict(np.array([[-.1, -.1, .15, .15],
[-.1, .15, -.1, .15]]).T
).reshape((2, 2)),
alpha=.5)
ax.set_xlabel('X_1')
ax.set_ylabel('X_2')
ax.set_zlabel('Y')
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
#Generate the three different figures from different views
elev = 43.5
azim = -110
plot_figs(1, elev, azim, X_train, ols)
elev = -.5
azim = 0
plot_figs(2, elev, azim, X_train, ols)
elev = -.5
azim = 90
plot_figs(3, elev, azim, X_train, ols)
plt.show()
| bsd-3-clause |
EntilZha/leaderboard | scoring/appthis.py | 1 | 2169 | import csv
import os
import pickle
from io import StringIO
from sklearn.metrics import roc_auc_score as AUC
from scoring.abstract import AbstractScoring
cur_dir_path = os.path.dirname(os.path.realpath(__file__))
class AppthisAUCScoring(AbstractScoring):
@property
def higher_better(self):
return True
def validate(self, submission_text: str):
tmp_file = StringIO(submission_text)
reader = csv.DictReader(tmp_file)
if set(reader.fieldnames) != set(['event_id', 'conversion_probability']):
tmp_file.close()
return False, (
"The headers of your CSV file are {}. They must be 'event_id' and "
"'conversion_probability'.".format(reader.fieldnames)
)
return True, None
def score(self, submission_text: str):
public_preds, private_preds, public_actuals, private_actuals = [1, 0], [1, 0], [1, 0], [1, 0]
tmp_file = StringIO(submission_text)
csv_reader = csv.DictReader(tmp_file)
public_event_ids_pkl_name = '{}/public_validation_event_ids.pkl'.format(cur_dir_path)
with open(public_event_ids_pkl_name, 'rb') as public_validation_event_ids_file:
public_validation_event_ids = pickle.load(public_validation_event_ids_file)
for row in csv_reader:
if row['event_id'] in public_validation_event_ids:
public_preds.append(float(row['conversion_probability']))
else:
private_preds.append(float(row['conversion_probability']))
with open('{}/all_validation_labels.txt'.format(cur_dir_path), 'r') as all_validation_labels_file:
for line in all_validation_labels_file:
event_id, event_label = line.rstrip().split(' ')
if event_id in public_validation_event_ids:
public_actuals.append(float(event_label))
else:
private_actuals.append(float(event_label))
public_score = AUC(public_actuals, public_preds)
private_score = AUC(private_actuals, private_preds)
return public_score, private_score, None
| apache-2.0 |
pkruskal/scikit-learn | sklearn/feature_selection/tests/test_base.py | 170 | 3666 | import numpy as np
from scipy import sparse as sp
from nose.tools import assert_raises, assert_equal
from numpy.testing import assert_array_equal
from sklearn.base import BaseEstimator
from sklearn.feature_selection.base import SelectorMixin
from sklearn.utils import check_array
class StepSelector(SelectorMixin, BaseEstimator):
"""Retain every `step` features (beginning with 0)"""
def __init__(self, step=2):
self.step = step
def fit(self, X, y=None):
X = check_array(X, 'csc')
self.n_input_feats = X.shape[1]
return self
def _get_support_mask(self):
mask = np.zeros(self.n_input_feats, dtype=bool)
mask[::self.step] = True
return mask
support = [True, False] * 5
support_inds = [0, 2, 4, 6, 8]
X = np.arange(20).reshape(2, 10)
Xt = np.arange(0, 20, 2).reshape(2, 5)
Xinv = X.copy()
Xinv[:, 1::2] = 0
y = [0, 1]
feature_names = list('ABCDEFGHIJ')
feature_names_t = feature_names[::2]
feature_names_inv = np.array(feature_names)
feature_names_inv[1::2] = ''
def test_transform_dense():
sel = StepSelector()
Xt_actual = sel.fit(X, y).transform(X)
Xt_actual2 = StepSelector().fit_transform(X, y)
assert_array_equal(Xt, Xt_actual)
assert_array_equal(Xt, Xt_actual2)
# Check dtype matches
assert_equal(np.int32, sel.transform(X.astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(X.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_t_actual = sel.transform(feature_names)
assert_array_equal(feature_names_t, names_t_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xt_actual = sel.fit(sparse(X)).transform(sparse(X))
Xt_actual2 = sel.fit_transform(sparse(X))
assert_array_equal(Xt, Xt_actual.toarray())
assert_array_equal(Xt, Xt_actual2.toarray())
# Check dtype matches
assert_equal(np.int32, sel.transform(sparse(X).astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(sparse(X).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_inverse_transform_dense():
sel = StepSelector()
Xinv_actual = sel.fit(X, y).inverse_transform(Xt)
assert_array_equal(Xinv, Xinv_actual)
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(Xt.astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(Xt.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_inv_actual = sel.inverse_transform(feature_names_t)
assert_array_equal(feature_names_inv, names_inv_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_inverse_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xinv_actual = sel.fit(sparse(X)).inverse_transform(sparse(Xt))
assert_array_equal(Xinv, Xinv_actual.toarray())
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(sparse(Xt).astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(sparse(Xt).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_get_support():
sel = StepSelector()
sel.fit(X, y)
assert_array_equal(support, sel.get_support())
assert_array_equal(support_inds, sel.get_support(indices=True))
| bsd-3-clause |
rosswhitfield/javelin | docs/conf.py | 1 | 1331 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'sphinx.ext.graphviz',
'sphinx.ext.imgmath',
'matplotlib.sphinxext.plot_directive'
]
source_suffix = '.rst'
master_doc = 'index'
project = 'Javelin'
copyright = '2017, Ross Whitfield'
author = 'Ross Whitfield'
version = '0.1.0'
release = '0.1.0'
exclude_patterns = ['_build']
pygments_style = 'friendly'
html_theme = 'sphinx_rtd_theme'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Javelindoc'
latex_documents = [
(master_doc, 'Javelin.tex', 'Javelin Documentation',
'Ross Whitfield', 'manual'),
]
intersphinx_mapping = {'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'xarray': ('https://xarray.pydata.org/en/stable/', None),
'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None),
'ase': ('https://wiki.fysik.dtu.dk/ase/', None),
'diffpy.Structure': ('https://www.diffpy.org/diffpy.structure/', None)}
autodoc_default_flags = ['members', 'undoc-members']
# Use legacy numpy printing. This fix is made to keep doctests functional.
try:
np.set_printoptions(legacy='1.13')
except TypeError:
pass
| mit |
rrohan/scikit-learn | examples/plot_multilabel.py | 236 | 4157 | # Authors: Vlad Niculae, Mathieu Blondel
# License: BSD 3 clause
"""
=========================
Multilabel classification
=========================
This example simulates a multi-label document classification problem. The
dataset is generated randomly based on the following process:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that n is more
than 2, and that the document length is never zero. Likewise, we reject classes
which have already been chosen. The documents that are assigned to both
classes are plotted surrounded by two colored circles.
The classification is performed by projecting to the first two principal
components found by PCA and CCA for visualisation purposes, followed by using
the :class:`sklearn.multiclass.OneVsRestClassifier` metaclassifier using two
SVCs with linear kernels to learn a discriminative model for each class.
Note that PCA is used to perform an unsupervised dimensionality reduction,
while CCA is used to perform a supervised one.
Note: in the plot, "unlabeled samples" does not mean that we don't know the
labels (as in semi-supervised learning) but that the samples simply do *not*
have a label.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import LabelBinarizer
from sklearn.decomposition import PCA
from sklearn.cross_decomposition import CCA
def plot_hyperplane(clf, min_x, max_x, linestyle, label):
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(min_x - 5, max_x + 5) # make sure the line is long enough
yy = a * xx - (clf.intercept_[0]) / w[1]
plt.plot(xx, yy, linestyle, label=label)
def plot_subfigure(X, Y, subplot, title, transform):
if transform == "pca":
X = PCA(n_components=2).fit_transform(X)
elif transform == "cca":
X = CCA(n_components=2).fit(X, Y).transform(X)
else:
raise ValueError
min_x = np.min(X[:, 0])
max_x = np.max(X[:, 0])
min_y = np.min(X[:, 1])
max_y = np.max(X[:, 1])
classif = OneVsRestClassifier(SVC(kernel='linear'))
classif.fit(X, Y)
plt.subplot(2, 2, subplot)
plt.title(title)
zero_class = np.where(Y[:, 0])
one_class = np.where(Y[:, 1])
plt.scatter(X[:, 0], X[:, 1], s=40, c='gray')
plt.scatter(X[zero_class, 0], X[zero_class, 1], s=160, edgecolors='b',
facecolors='none', linewidths=2, label='Class 1')
plt.scatter(X[one_class, 0], X[one_class, 1], s=80, edgecolors='orange',
facecolors='none', linewidths=2, label='Class 2')
plot_hyperplane(classif.estimators_[0], min_x, max_x, 'k--',
'Boundary\nfor class 1')
plot_hyperplane(classif.estimators_[1], min_x, max_x, 'k-.',
'Boundary\nfor class 2')
plt.xticks(())
plt.yticks(())
plt.xlim(min_x - .5 * max_x, max_x + .5 * max_x)
plt.ylim(min_y - .5 * max_y, max_y + .5 * max_y)
if subplot == 2:
plt.xlabel('First principal component')
plt.ylabel('Second principal component')
plt.legend(loc="upper left")
plt.figure(figsize=(8, 6))
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=True,
random_state=1)
plot_subfigure(X, Y, 1, "With unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 2, "With unlabeled samples + PCA", "pca")
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
random_state=1)
plot_subfigure(X, Y, 3, "Without unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 4, "Without unlabeled samples + PCA", "pca")
plt.subplots_adjust(.04, .02, .97, .94, .09, .2)
plt.show()
| bsd-3-clause |
HazenBabcock/opensdraw | opensdraw/library/knots.py | 1 | 9183 | #!/usr/bin/env python
"""
.. module:: knots
:synopsis: Python functions to create knots.
.. moduleauthor:: Hazen Babcock
"""
import math
import numbers
import numpy
import os
import opensdraw.lcad_language.curve as curve
import opensdraw.lcad_language.curveFunctions as curveFunctions
import opensdraw.lcad_language.geometry as geometry
import opensdraw.lcad_language.interpreter as interpreter
lcad_functions = {}
class SheetBendKnot(interpreter.LCadFunction):
"""
**sheet-bend-knot** - Creates a sheet bend knot function.
This creates and returns a function that parametrizes a sheet bend knot. All
units are LDU.
When you call the created knot function you will get a 4 x 4 transform
matrix which will translate to the requested position on the knot and
orient to a coordinate system where the z-axis is pointing along the
knot, the x-axis is in the plane of the knot and the y-axis is
perpendicular to the plane of the knot.
:param diameter: The diameter of the string.
:param loop_size: The diameter of the loop.
Usage::
(def sbk (sheet-bend-knot 3 10)) ; A knot with 3 LDU diameter string and loop diameter of 10.
(def p1 (sbk 1)) ; p1 is the list (x y z rx ry rz) which defines the
; knot at distance 1 along the knot.
(sbk t) ; Returns the length of the knot.
"""
def __init__(self):
interpreter.LCadFunction.__init__(self, "sheet-bend-knot")
self.setSignature([[numbers.Number], [numbers.Number]])
def call(self, model, diameter, loop_size):
sbknot = SBKnot(diameter, loop_size)
return curveFunctions.CurveFunction(sbknot, "user created sheet bend knot function.")
lcad_functions["sheet-bend-knot"] = SheetBendKnot()
#
# The sheet bend knot is created using two custom curves and a large loop.
#
class SBKnot(object):
def __init__(self, diameter, loop_size):
self.loop_size = loop_size
self.scale = diameter
# Curve 1.
cpts1 = []
cpts1.append(curve.ControlPoint(0, 0, 0, 0, 0, 1, 1, 0, 0))
cpts1.append(curve.ControlPoint(2, 0, 6, 1, 0, 1))
c1_fname = os.path.join(os.path.dirname(__file__), "sbk_curve1.txt")
self.curve1 = KnotCurve(c1_fname, cpts1)
self.curve1_stop = self.curve1.getLength() * self.scale
self.seg1_stop = self.curve1_stop + 0.5 * self.loop_size - self.scale * math.sqrt(2*2 + 2*2)
self.seg1_x_start = 2 * self.scale
self.seg1_z_start = 6 * self.scale
self.seg1_dx = 1.0/math.sqrt(2)
self.seg1_dz = 1.0/math.sqrt(2)
# This (sort of) handles small loops.
if (self.seg1_stop < self.curve1_stop):
self.curve1_stop = self.seg1_stop
# Loop.
self.loop_stop = self.seg1_stop + 0.75 * math.pi * self.loop_size
self.loop_cx = 0
self.loop_cz = math.sqrt(2) * 0.5 * self.loop_size + self.scale * 4
# Straight segment 2.
self.seg2_stop = self.loop_stop + 0.5 * self.loop_size - self.scale * math.sqrt(2*2 + 2*2)
self.seg2_x_start = self.loop_cx - 0.5 * self.loop_size / math.sqrt(2)
self.seg2_z_start = self.loop_cz - 0.5 * self.loop_size / math.sqrt(2)
self.seg2_dx = 1.0/math.sqrt(2)
self.seg2_dz = -1.0/math.sqrt(2)
#Curve 2.
cpts2 = []
cpts2.append(curve.ControlPoint(-2, 0, 6, 1, 0, -1, -1, 0, 0))
cpts2.append(curve.ControlPoint(-1, 0, 5, 1, 0, -1))
cpts2.append(curve.ControlPoint(0.5, -0.8, 3.5, 1, 0, -1))
cpts2.append(curve.ControlPoint(1, 0, 2.5, 0, 1, 0))
cpts2.append(curve.ControlPoint(0, 0.8, 2, -1, 0, 0))
cpts2.append(curve.ControlPoint(-0.8, 0, 2.2, 0, -1, 0))
cpts2.append(curve.ControlPoint(-0.8, -1.2, 3.5, 0.2, 0, 1))
cpts2.append(curve.ControlPoint(0.2, 0, 5.1, 0, 1, 0))
cpts2.append(curve.ControlPoint(1.2, 0.8, 4.1, 1, -0.2, -1.2))
cpts2.append(curve.ControlPoint(2, 0, 2.5, 0, -1, -1))
cpts2.append(curve.ControlPoint(0, -0.9, 2.7, -1, 0.3, 1))
cpts2.append(curve.ControlPoint(-1.5, 0, 4.2, -1, 0, 1))
cpts2.append(curve.ControlPoint(-5, 0, 5.5, -1, 0, 0))
c2_fname = os.path.join(os.path.dirname(__file__), "sbk_curve2.txt")
self.curve2 = KnotCurve(c2_fname, cpts2)
self.curve2_stop = self.seg2_stop + self.curve2.getLength() * self.scale
self.length = self.curve2_stop
def getLength(self):
return self.length
def getMatrix(self, dist):
if (dist < self.curve1_stop):
m = self.curve1.getMatrix(dist / self.scale)
m[:3,3] = m[:3,3] * self.scale
return m
if (dist < self.seg1_stop):
dist -= self.curve1_stop
x = self.seg1_x_start + self.seg1_dx * dist
z = self.seg1_z_start + self.seg1_dz * dist
y_vec = [0, 1, 0]
z_vec = [self.seg1_dx, 0, self.seg1_dz]
x_vec = numpy.cross(y_vec, z_vec)
return geometry.vectorsToMatrix([x, 0, z], x_vec, y_vec, z_vec)
if (dist < self.loop_stop):
dist -= self.seg1_stop
angle = 0.75 * math.pi - 2.0 * dist/self.loop_size
x = self.loop_cx + 0.5 * self.loop_size * math.sin(angle)
z = self.loop_cz + 0.5 * self.loop_size * math.cos(angle)
dx = math.cos(angle)
dz = -math.sin(angle)
y_vec = [0, 1, 0]
z_vec = [-dx, 0, -dz]
x_vec = numpy.cross(y_vec, z_vec)
return geometry.vectorsToMatrix([x, 0, z], x_vec, y_vec, z_vec)
if (dist < self.seg2_stop):
dist -= self.loop_stop
x = self.seg2_x_start + self.seg2_dx * dist
z = self.seg2_z_start + self.seg2_dz * dist
y_vec = [0, 1, 0]
z_vec = [self.seg2_dx, 0, self.seg2_dz]
x_vec = numpy.cross(y_vec, z_vec)
return geometry.vectorsToMatrix([x, 0, z], x_vec, y_vec, z_vec)
dist -= self.seg2_stop
m = self.curve2.getMatrix(dist / self.scale)
m[:3,3] = m[:3,3] * self.scale
return m
def saveControlPoint(fp, cp):
fp.write(" ".join(map(str, cp.location.tolist())))
fp.write(" ")
fp.write(" ".join(map(str, cp.raw_z_vec.tolist())))
if cp.x_vec is not None:
fp.write(" ")
fp.write(" ".join(map(str, cp.x_vec.tolist())))
fp.write("\n")
class KnotControlPoint(curve.ControlPoint):
"""
Load and save control point from a file as an optimization.
"""
def __init__(self, data):
vals = map(float, data.split(" "))
curve.ControlPoint.__init__(self, *vals)
class KnotCurve(curve.Curve):
def __init__(self, filename, control_points):
# Load control points from file.
if os.path.exists(filename):
curve.Curve.__init__(self, False, True, 1.0, 0.0)
control_points = []
with open(filename) as fp:
for line in fp:
control_points.append(KnotControlPoint(line))
i = 0
while (i < len(control_points)):
self.addSegment(control_points[i], control_points[i+1])
i += 2
# Calculate control points & save to file.
else:
curve.Curve.__init__(self, True, True, 1.0, 0.0)
with open(filename, "w") as fp:
for i in range(len(control_points)-1):
self.addSegment(control_points[i], control_points[i+1])
saveControlPoint(fp, control_points[i])
saveControlPoint(fp, control_points[i+1])
#
# Testing
#
if (__name__ == "__main__"):
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
knot = SBKnot(1, 10)
fig = plt.figure()
axis = fig.gca(projection='3d')
MAX = 10
for direction in (-1, 1):
for point in numpy.diag(direction * MAX * numpy.array([1,1,1])):
axis.plot([point[0]], [point[1]], [point[2]], 'w')
if 0:
d = numpy.linspace(0, knot.length, 50)
x = numpy.zeros(d.size)
y = numpy.zeros(d.size)
z = numpy.zeros(d.size)
for i in range(d.size):
x[i], y[i], z[i] = knot.getCoords(d[i])[:3]
axis.plot(x, y, z)
#axis.scatter(x, y, z)
if 1:
vlen = 0.5
#d = numpy.linspace(0, 20.0 + belt.getLength(), 40) - 10.0
d = numpy.linspace(0, knot.getLength(), 100)
for i in range(d.size):
m = knot.getMatrix(d[i])
x = m[0,3]
y = m[1,3]
z = m[2,3]
vx = numpy.dot(m, numpy.array([vlen, 0, 0, 1]))
vy = numpy.dot(m, numpy.array([0, vlen, 0, 1]))
vz = numpy.dot(m, numpy.array([0, 0, vlen, 1]))
for elt in [[vx, "red"], [vy, "green"], [vz, "blue"]]:
axis.plot([x, elt[0][0]],
[y, elt[0][1]],
[z, elt[0][2]],
color = elt[1])
plt.show()
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.