repo_name
stringlengths 6
92
| path
stringlengths 4
191
| copies
stringclasses 322
values | size
stringlengths 4
6
| content
stringlengths 821
753k
| license
stringclasses 15
values |
---|---|---|---|---|---|
winklerand/pandas | pandas/tests/sparse/test_reshape.py | 12 | 1088 | import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
@pytest.fixture
def sparse_df():
return pd.SparseDataFrame({0: {0: 1}, 1: {1: 1}, 2: {2: 1}}) # eye
@pytest.fixture
def multi_index3():
return pd.MultiIndex.from_tuples([(0, 0), (1, 1), (2, 2)])
def test_sparse_frame_stack(sparse_df, multi_index3):
ss = sparse_df.stack()
expected = pd.SparseSeries(np.ones(3), index=multi_index3)
tm.assert_sp_series_equal(ss, expected)
def test_sparse_frame_unstack(sparse_df):
mi = pd.MultiIndex.from_tuples([(0, 0), (1, 0), (1, 2)])
sparse_df.index = mi
arr = np.array([[1, np.nan, np.nan],
[np.nan, 1, np.nan],
[np.nan, np.nan, 1]])
unstacked_df = pd.DataFrame(arr, index=mi).unstack()
unstacked_sdf = sparse_df.unstack()
tm.assert_numpy_array_equal(unstacked_df.values, unstacked_sdf.values)
def test_sparse_series_unstack(sparse_df, multi_index3):
frame = pd.SparseSeries(np.ones(3), index=multi_index3).unstack()
tm.assert_sp_frame_equal(frame, sparse_df)
| bsd-3-clause |
gfyoung/pandas | pandas/tests/indexes/multi/test_join.py | 2 | 3799 | import numpy as np
import pytest
import pandas as pd
from pandas import Index, MultiIndex
import pandas._testing as tm
@pytest.mark.parametrize(
"other", [Index(["three", "one", "two"]), Index(["one"]), Index(["one", "three"])]
)
def test_join_level(idx, other, join_type):
join_index, lidx, ridx = other.join(
idx, how=join_type, level="second", return_indexers=True
)
exp_level = other.join(idx.levels[1], how=join_type)
assert join_index.levels[0].equals(idx.levels[0])
assert join_index.levels[1].equals(exp_level)
# pare down levels
mask = np.array([x[1] in exp_level for x in idx], dtype=bool)
exp_values = idx.values[mask]
tm.assert_numpy_array_equal(join_index.values, exp_values)
if join_type in ("outer", "inner"):
join_index2, ridx2, lidx2 = idx.join(
other, how=join_type, level="second", return_indexers=True
)
assert join_index.equals(join_index2)
tm.assert_numpy_array_equal(lidx, lidx2)
tm.assert_numpy_array_equal(ridx, ridx2)
tm.assert_numpy_array_equal(join_index2.values, exp_values)
def test_join_level_corner_case(idx):
# some corner cases
index = Index(["three", "one", "two"])
result = index.join(idx, level="second")
assert isinstance(result, MultiIndex)
with pytest.raises(TypeError, match="Join.*MultiIndex.*ambiguous"):
idx.join(idx, level=1)
def test_join_self(idx, join_type):
joined = idx.join(idx, how=join_type)
tm.assert_index_equal(joined, idx)
def test_join_multi():
# GH 10665
midx = pd.MultiIndex.from_product([np.arange(4), np.arange(4)], names=["a", "b"])
idx = Index([1, 2, 5], name="b")
# inner
jidx, lidx, ridx = midx.join(idx, how="inner", return_indexers=True)
exp_idx = pd.MultiIndex.from_product([np.arange(4), [1, 2]], names=["a", "b"])
exp_lidx = np.array([1, 2, 5, 6, 9, 10, 13, 14], dtype=np.intp)
exp_ridx = np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=np.intp)
tm.assert_index_equal(jidx, exp_idx)
tm.assert_numpy_array_equal(lidx, exp_lidx)
tm.assert_numpy_array_equal(ridx, exp_ridx)
# flip
jidx, ridx, lidx = idx.join(midx, how="inner", return_indexers=True)
tm.assert_index_equal(jidx, exp_idx)
tm.assert_numpy_array_equal(lidx, exp_lidx)
tm.assert_numpy_array_equal(ridx, exp_ridx)
# keep MultiIndex
jidx, lidx, ridx = midx.join(idx, how="left", return_indexers=True)
exp_ridx = np.array(
[-1, 0, 1, -1, -1, 0, 1, -1, -1, 0, 1, -1, -1, 0, 1, -1], dtype=np.intp
)
tm.assert_index_equal(jidx, midx)
assert lidx is None
tm.assert_numpy_array_equal(ridx, exp_ridx)
# flip
jidx, ridx, lidx = idx.join(midx, how="right", return_indexers=True)
tm.assert_index_equal(jidx, midx)
assert lidx is None
tm.assert_numpy_array_equal(ridx, exp_ridx)
def test_join_self_unique(idx, join_type):
if idx.is_unique:
joined = idx.join(idx, how=join_type)
assert (idx == joined).all()
def test_join_multi_wrong_order():
# GH 25760
# GH 28956
midx1 = pd.MultiIndex.from_product([[1, 2], [3, 4]], names=["a", "b"])
midx2 = pd.MultiIndex.from_product([[1, 2], [3, 4]], names=["b", "a"])
join_idx, lidx, ridx = midx1.join(midx2, return_indexers=True)
exp_ridx = np.array([-1, -1, -1, -1], dtype=np.intp)
tm.assert_index_equal(midx1, join_idx)
assert lidx is None
tm.assert_numpy_array_equal(ridx, exp_ridx)
def test_join_multi_return_indexers():
# GH 34074
midx1 = pd.MultiIndex.from_product([[1, 2], [3, 4], [5, 6]], names=["a", "b", "c"])
midx2 = pd.MultiIndex.from_product([[1, 2], [3, 4]], names=["a", "b"])
result = midx1.join(midx2, return_indexers=False)
tm.assert_index_equal(result, midx1)
| bsd-3-clause |
lzamparo/SdA_reduce | utils/parse_gb_vs_relu_data_files.py | 1 | 3187 | """ Process all the model finetuning ReLU vs GB test output files.
Fine-tuning file names look like this: finetune_sda_<model_arch>.2014-06-18.23:25:20.199112
The average training error for each epoch and batch is reported:
e.g epoch 50, minibatch 2620/2620, training error 124.029259
The validation error over all validation batches is reported at the end of each epoch:
e.g epoch 50, minibatch 2620/2620, validation error 169.730011
We care only about the validation error over the epochs.
Each layer transition is marked by the line: Pickling the model..."""
import sys, re, os
import numpy as np
import pandas as pd
from collections import OrderedDict
# Extract the model name from each filename.
def extract_model_and_param(regex,filename):
match = regex.match(filename)
if match is not None:
return match.groups()
# Extract the layer and cost from a line
def parse_line(line,data_regex):
match = data_regex.match(line)
if match is not None:
return match.groups()
else:
return (None, None, None, None, None)
input_dir = '/data/sda_output_data/relu_vs_gb'
# compile a regex to extract the model from a given filename
model_and_param = re.compile("finetune_sda_([\d_]+).[\w]+\.*")
data_regex = re.compile("epoch ([\d]+)\, minibatch ([\d])+\/([\d]+)\, ([a-z]+) error ([\d.]+)")
# Store the contents of each file as a DataFrame, add it to the hyperparam_dfs list.
data_files = []
print "...Processing files"
currdir = os.getcwd()
# for each file:
for group in ["3_layers","5_layers"]:
for layertype in ["gb","relu"]:
# read a list of all files in the directory that match model output files
os.chdir(os.path.join(input_dir,group,layertype))
model_files = os.listdir(".")
for f in model_files:
validation_model = []
epoch_list = []
if not f.startswith("finetune_sda"):
continue
f_model = extract_model_and_param(model_and_param, f)
infile = open(f, 'r')
for line in infile:
if not line.startswith("epoch"):
continue
(epoch, mb_index, mb_total, phase, err) = parse_line(line,data_regex)
if epoch is not None and phase == 'validation':
epoch_list.append(int(epoch))
validation_model.append(float(err))
infile.close()
# build the df, store in list
model_list = [f_model[0] for i in xrange(len(validation_model))]
group_list = [group for i in xrange(len(validation_model))]
layer_list = [layertype for i in xrange(len(validation_model))]
f_dict = {"model": model_list, "group": group_list, "layer": layer_list, "score": validation_model, "epoch": epoch_list}
data_files.append(pd.DataFrame(data=f_dict))
print "...Done"
print "...rbinding DataFrames"
master_df = data_files[0]
for i in xrange(1,len(data_files)):
master_df = master_df.append(data_files[i])
print "...Done"
os.chdir(input_dir)
master_df.to_csv(path_or_buf="both_models.csv",index=False)
| bsd-3-clause |
hainm/scikit-learn | examples/cluster/plot_adjusted_for_chance_measures.py | 286 | 4353 | """
==========================================================
Adjustment for chance in clustering performance evaluation
==========================================================
The following plots demonstrate the impact of the number of clusters and
number of samples on various clustering performance evaluation metrics.
Non-adjusted measures such as the V-Measure show a dependency between
the number of clusters and the number of samples: the mean V-Measure
of random labeling increases significantly as the number of clusters is
closer to the total number of samples used to compute the measure.
Adjusted for chance measure such as ARI display some random variations
centered around a mean score of 0.0 for any number of samples and
clusters.
Only adjusted measures can hence safely be used as a consensus index
to evaluate the average stability of clustering algorithms for a given
value of k on various overlapping sub-samples of the dataset.
"""
print(__doc__)
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from time import time
from sklearn import metrics
def uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=None, n_runs=5, seed=42):
"""Compute score for 2 random uniform cluster labelings.
Both random labelings have the same number of clusters for each value
possible value in ``n_clusters_range``.
When fixed_n_classes is not None the first labeling is considered a ground
truth class assignment with fixed number of classes.
"""
random_labels = np.random.RandomState(seed).random_integers
scores = np.zeros((len(n_clusters_range), n_runs))
if fixed_n_classes is not None:
labels_a = random_labels(low=0, high=fixed_n_classes - 1,
size=n_samples)
for i, k in enumerate(n_clusters_range):
for j in range(n_runs):
if fixed_n_classes is None:
labels_a = random_labels(low=0, high=k - 1, size=n_samples)
labels_b = random_labels(low=0, high=k - 1, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
score_funcs = [
metrics.adjusted_rand_score,
metrics.v_measure_score,
metrics.adjusted_mutual_info_score,
metrics.mutual_info_score,
]
# 2 independent random clusterings with equal cluster number
n_samples = 100
n_clusters_range = np.linspace(2, n_samples, 10).astype(np.int)
plt.figure(1)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, np.median(scores, axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for 2 random uniform labelings\n"
"with equal number of clusters")
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.legend(plots, names)
plt.ylim(ymin=-0.05, ymax=1.05)
# Random labeling with varying n_clusters against ground class labels
# with fixed number of clusters
n_samples = 1000
n_clusters_range = np.linspace(2, 100, 10).astype(np.int)
n_classes = 10
plt.figure(2)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=n_classes)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, scores.mean(axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for random uniform labeling\n"
"against reference assignment with %d classes" % n_classes)
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.ylim(ymin=-0.05, ymax=1.05)
plt.legend(plots, names)
plt.show()
| bsd-3-clause |
kostyfisik/onza-fdtd | scripts/test-parallel/test-parallel.py | 1 | 27160 | #! /usr/bin/python
# This script performs efficiency evaluation of mpi-based programs and
# outputs the optimal mpirun parameters for a chosen configuration.
# Specifies a vector of defined size filled with zeroes
def GetVector (vector, sz):
if len(vector) == 0:
for i in range(sz):
vector.append(0)
# Specifies a matrix of defined sizes filled with zeroes
def GetMatrix (matrix, sz1, sz2):
if len(matrix) == 0:
for i in range(sz1):
matrix.append([])
for j in range(sz2):
matrix[i].append(0.0)
# Starts the file, which name is the first command line parameter
def Start (path) :
import os
os.system(path[0])
# Measures time spent on processing file
def MeasureTime (statement) :
if statement[-1] == ')': # processes any function from main
counter = 2
while statement[-counter] != '(':
counter = counter + 1
cnt = counter - 1
setup = "from __main__ import " + statement[:-counter] + ", " + statement[-cnt:-1]
return timeit.timeit(stmt = statement, setup = setup, number = 1)
else: # processes any user-defined operation
return timeit.timeit(stmt = statement, number = 1)
# Returns the optimal number of averages based on predefined 3 sigma criteria
def GetNumber (statement):
number = 3
mean = 0
Take = []
for i in range(number):
Take.append(MeasureTime(statement))
mean = mean + Take[i]
mean = mean / number
sigma2 = 0
for i in range(number):
sigma2 = sigma2 + pow(mean - Take[i], 2)
sigma2 = sigma2 / number
if (MeasureTime(statement) - mean) < 3 * sqrt(sigma2) or (MeasureTime(statement) - mean) > -3 * sqrt(sigma2): # chosen P = 95%
return number
else:
return number + 2
# Runs MeasureTime() several times to calculate the mean time for file processing
def GetMean (statement, number) :
mean = 0
for i in range (number):
mean = mean + MeasureTime(statement)
return mean / number
# Prints mean time for file processing
def PrintMean (path, mean, number) :
print " Timing file:" + path
print " Mean = ", mean
print " Averaging number = ", number
# Runs the file with preset parameters and measures means for performance times
def ParametricRun (path, binary_file, config_file, max_proc_number, number, Means) :
for i in range (max_proc_number[0]):
path[0] = "mpirun -n " + '%d' % (i + 1) + " " + binary_file + " " + config_file
stmt = "Start(path)"
Means[i] = GetMean(stmt, number)
# Runs the file with preset parameters and measures means for performance times in case of multiple nodes
def ParametricRunNodes(path, binary_file, config_file, nodes_number, max_proc_number, number, Nodes, Procs, Means) :
N = [] # Nodes
n = [] # processes
means = [] # times
# !!!!!!!!!!!!!more common analysis required
for i in 1, nodes_number[0] / 4, nodes_number[0] / 2, 3 * nodes_number[0] / 4, nodes_number[0]:
N.append(i)
#!!!!!!!!!!!more common analysis required
for j in i, i * max_proc_number[0] / 4, i * max_proc_number[0] / 2, i * 3 * max_proc_number[0] / 4, i * max_proc_number[0]:
n.append(j)
path[0] = "salloc -N " + '%d' % (i) + " -n " + '%d' % (j) + " -p max1day " + "mpirun --bind-to-core ./" + binary_file + " " + config_file
# print path[0]
stmt = "Start(path)"
# means.append(10.0 / (1 + 0.05 * i + 0.01 * j))
means.append(GetMean(stmt, number))
GetMatrix(Means, len(N), len(n) / 5)
GetVector(Procs, len(n))
GetVector(Nodes, len(N))
for i in range(0, len(n)):
Procs[i] = n[i]
for i in range(0, len(N)):
Nodes[i] = N[i]
for j in range(0, len(n) / 5):
Means[i][j] = means[j + i * len(n) / 5 ]
# Get optimal number of mpi processes for single node
def GetOptimal (opt_n, Means):
opt_n[0] = 1
minimum = Means[0]
for i in range(len(Means)):
if Means[i] < minimum:
minimum = Means[i]
opt_n[0] = i + 1
# Get optimal number of processes for highest efficiency
def GetOptimalEfficiency (opt_n, Efficiency):
opt_n[0] = 1
maximum = Efficiency[1]
for i in range(1, len(Efficiency)):
if Efficiency[i] >= maximum:
maximum = Efficiency[i]
opt_n[0] = i + 1
# Sorts Means to get optimal parameters for multiple nodes
def GetOptimalNodes (opt_N, opt_n, Nodes, Procs, Means) :
opt_n[0] = 1
opt_N[0] = 1
minimum = Means[0][0]
for i in range(len(Nodes)):
for j in range(len(Procs) / 5):
if Means[i][j] <= minimum:
minimum = Means[i][j]
opt_n[0] = Nodes[i] * Procs[j]
opt_N[0] = Nodes[i]
# Sorts Means to get optimal parameters for multiple nodes Efficiency
def GetOptimalEfficiencyNodes (opt_N, opt_n, Nodes, Procs, Efficiency) :
opt_n[0] = 1
opt_N[0] = 1
maximum = Efficiency[0][1]
for i in range(len(Nodes)):
for j in range(len(Procs) / 5):
if ((i + j)!= 0):
if Efficiency[i][j] >= maximum:
maximum = Efficiency[i][j]
opt_n[0] = Nodes[i] * Procs[j]
opt_N[0] = Nodes[i]
# Displays optimal number of mpi processes for set config
def DisplayOptimals (opt_N, opt_n, goal, binary_file, config_file):
if opt_N[0] == 1:
print "\n>> " + goal + " check:"
print ("\n>> Optimal number of processes for " + binary_file + " = " + '%d' % (opt_n[0]) + ", single node")
else:
print "\n>> " + goal + " check:"
print ("\n>> Optimal number of processes for " + sys.argv[1] + " = " + '%d' % (opt_n[0]) + ", " + '%d' % (opt_N[0]) + " nodes")
# Plots the performance on different number of mpi processes and displays it
def ShowPlot (x, y) :
scatter(x, y, c='b', marker='o', linewidth=5.0)
pylab.show()
# Makes and saves plots displaying performance data
def MakeAndSavePlots (x,y1,y2,y3):
scatter(x, y1, c='r', marker='o', linewidth=5.0)
matplotlib.pyplot.ylabel('Mean time, s')
matplotlib.pyplot.xlabel('Number of mpi processes')
savefig("exec_1_mean_time.png", dpi=200)
plt.clf()
scatter(x, y2, c='r', marker='o', linewidth=5.0)
matplotlib.pyplot.ylabel('Acceleration')
matplotlib.pyplot.xlabel('Number of mpi processes')
savefig("exec_2_acceleration.png", dpi=200)
plt.clf()
scatter(x, y3, c='r', marker='o', linewidth=5.0)
matplotlib.pyplot.ylabel('Efficiency, %')
matplotlib.pyplot.xlabel('Number of mpi processes')
savefig("exec_3_efficiency.png", dpi=200)
# Creates performance plots for multiple node case
def MakeAndSavePlotsNodes (x, y1, y2, y3):
my1 = []
my2 = []
my3 = []
GetVector(my1, len(x))
GetVector(my2, len(x))
GetVector(my3, len(x))
for i in range(len(x) / 5):
for j in range(len(x) / 5):
my1[i * len(x) / 5 + j] = y1[i][j]
my2[i * len(x) / 5 + j] = y2[i][j]
my3[i * len(x) / 5 + j] = y3[i][j]
scatter(x, my1, c='r', marker='o', linewidth=5.0)
matplotlib.pyplot.ylabel('Mean time, s')
matplotlib.pyplot.xlabel('Number of mpi processes')
savefig("exec_1_mean_time.png", dpi=200)
plt.clf()
scatter(x, my2, c='r', marker='o', linewidth=5.0)
matplotlib.pyplot.ylabel('Acceleration')
matplotlib.pyplot.xlabel('Number of mpi processes')
savefig("exec_2_acceleration.png", dpi=200)
plt.clf()
scatter(x, my3, c='r', marker='o', linewidth=5.0)
matplotlib.pyplot.ylabel('Efficiency, %')
matplotlib.pyplot.xlabel('Number of mpi processes')
savefig("exec_3_efficiency.png", dpi=200)
# Get acceleration and efficiency (in %) of file performance for single node
def GetAccelerationAndEfficiency (Means, Acceleration, Efficiency) :
GetVector(Acceleration, len(Means))
GetVector(Efficiency, len(Means))
Acceleration[0] = 1
Efficiency[0] = 100
for i in range(1,len(Means)):
Acceleration[i] = Means[0] / Means[i]
Efficiency[i] = 100 * Acceleration[i] / (i + 1)
# Get acceleration and efficiency (in %) of file performance for multiple nodes
def GetAccelerationAndEfficiencyNodes (Nodes, Procs, Means, Acceleration, Efficiency) :
GetMatrix(Acceleration, len(Nodes), len(Procs) / 5)
GetMatrix(Efficiency, len(Nodes), len(Procs) / 5)
for i in range(0, len(Nodes)):
for j in range(0, len(Procs) / 5):
Acceleration[i][j] = float(Means[0][0] / Means[i][j])
Efficiency[i][j] = 100.0 * Acceleration[i][j] / (Procs[j + i * len(Procs) / 5])
# Write the obtained performance data into a txt file
def WriteData (nodes_number, Means, Acceleration, Efficiency):
f = open('performance_data.txt', 'w')
f.write('# Performance results for ' + sys.argv[1] + ':\n\n')
f.write('# Nodes\tProcesses\tMean time, s\tAcceleration\tEfficiency, %\n')
for i in range(len(Means)):
f.write('%d' % (nodes_number[0]) + '\t' + '%d' % (i + 1) + '\t\t' + '%f' % (Means[i]) + '\t' + '%.3f' % (Acceleration[i]) + '\t\t' + '%.2f' % (Efficiency[i]) + '\n')
f.close()
# Write the obtained performance data into a txt file for multiple nodes
def WriteDataNodes (binary_file, config_file, Nodes, Procs, Means, Acceleration, Efficiency):
f = open('performance_' + config_file + '.dat', 'w')
f.write('# Performance results for ' + binary_file + ' with ' + config_file + ' :\n\n')
f.write('# Nodes\tProcesses\tMean time, s\tAcceleration\tEfficiency, %\n')
for i in range(len(Nodes)):
for j in range(len(Procs) / 5):
f.write('%d' % (Nodes[i]) + '\t' + '%d' % Procs[j + i * len(Procs) / 5] + '\t\t' + '%f' % (Means[i][j]) + '\t' + '%.3f' % (Acceleration[i][j]) + '\t\t' + '%.2f' % (Efficiency[i][j]) + '\n')
f.write('\n\n')
f.close()
def WriteOptimalsNodes (binary_file, config_file, optimal_nodes, optimal_procs, goal):
f = open ('optimal_parameters.par', 'a')
f.write('# For ' + config_file + ', ' + goal + ' check:\n# Nodes\tProcesses\n')
f.write('%d' % (optimal_nodes) + '\t' + '%d' % (optimal_procs) + '\n')
f.close()
# Printing the final message of the program
def PrintFinalMessage ():
print "\n>> Performance data successfully obtained!"
print "\n>> .ps pictures and performance data .dat files can be found in ~/bin directory.\n"
# Getting the host name to recognize predefined machines
def GetHost ():
return socket.gethostname()
# Search for current host in known hosts list with predefined parameters
def IsHostKnown (hostname, nodes_number, max_procs_number):
if hostname == "dmmrkovich-birzha":
nodes_number[0] = 1
max_procs_number[0] = 4
print "\n>> Known host " + hostname + ", using " + '%d' % (nodes_number[0]) + " nodes with max. " + '%d' % (max_procs_number[0]) + " mpi processes."
elif hostname == "head.phoif.ifmo.ru":
nodes_number[0] = 16
max_procs_number[0] = 8
print "\n>> Known host " + hostname + ", using " + '%d' % (nodes_number[0]) + " nodes with max. " + '%d' % (max_procs_number[0]) + " mpi processes."
elif hostname == "debian":
nodes_number[0] = 1
max_procs_number[0] = 2
print "\n>> Known host " + hostname + ", using " + '%d' % (nodes_number[0]) + " nodes with max. " + '%d' % (max_procs_number[0]) + " mpi processes."
elif hostname == "tig-laptop2":
nodes_number[0] = 1
max_procs_number[0] = 2
print "\n>> Known host " + hostname + ", using " + '%d' % (nodes_number[0]) + " nodes with max. " + '%d' % (max_procs_number[0]) + " mpi processes."
elif hostname == "deb00":
nodes_number[0] = 1
max_procs_number[0] = 4
print "\n>> Known host " + hostname + ", using " + '%d' % (nodes_number[0]) + " nodes with max. " + '%d' % (max_procs_number[0]) + " mpi processes."
#################### Template for adding your host into the list of known hosts ####################################
## elif hostname == "your hostname": ##
## nodes_number[0] = "your nodes number" ##
## max_procs_number[0] = "your procs number" ##
######################################################################################################################
else:
print "\n>> Your hostname is unknown. Please, add your host to list of known hosts in function 'IsHostKnown'\n\n>> or just specify input parameters manually at the moment\n\n>> Enter the number of nodes( int numbers, please :) )"
nodes_number[0] = input()
print "\n>> Number of nodes is set to " + '%d' % (nodes_number[0])
print "\n>> Enter the maximal number of mpi_processes( int numbers, please :) )"
max_procs_number[0] = input()
print "\n>> Maximal number of mpi processes is set to " + '%d' % (max_procs_number[0])
# Determines whether the binary file has a config
def ChangeConfig (config_file, input_parameters):
f = open(config_file, 'r')
content = f.read()
f.close()
i = 0
while content[-i] != 'D':
i += 1
if content[-i] == '\n':
j = i
while content[-j] != ' ':
j += 1
input_parameters.append(content[-j + 1:-i])
t = int(input_parameters[0])
a = int(input_parameters[1])
result = ""
if len(input_parameters) == 5:
C = t * pow(a, 3)
q = 0
a_change = range(4); t_change = range(4); content_change = ["","","",""]
for i in 6, 4, 2, 1:
q += 1
a_change[q-1] = a * i / 8
t_change[q-1] = C / pow(a_change[q-1], 3)
content_change[q-1] = content
content_change[q-1] = content_change[q-1].replace('length_x = ' + '%d' % (a), 'length_x = ' + '%d' % (a_change[q-1]))
content_change[q-1] = content_change[q-1].replace('length_y = ' + '%d' % (a), 'length_y = ' + '%d' % (a_change[q-1]))
content_change[q-1] = content_change[q-1].replace('length_z = ' + '%d' % (a), 'length_z = ' + '%d' % (a_change[q-1]))
content_change[q-1] = content_change[q-1].replace('total_time_steps = ' + '%d' % (t), 'total_time_steps = ' + '%d' % (t_change[q-1]))
f = open('test-parallel-3D_' + '%d' % (q) + '.config', 'w')
f.write(content_change[q-1])
f.close()
return "test-parallel-3D"
elif len(input_parameters) == 4:
C = t * pow(a, 2)
q = 0
a_change = range(4); t_change = range(4); content_change = ["","","",""]
for i in 6, 4, 2, 1:
q += 1
a_change[q-1] = a * i / 8
t_change[q-1] = C / pow(a_change[q-1], 2)
content_change[q-1] = content
content_change[q-1] = content_change[q-1].replace('length_x = ' + '%d' % (a), 'length_x = ' + '%d' % (a_change[q-1]))
content_change[q-1] = content_change[q-1].replace('length_y = ' + '%d' % (a), 'length_y = ' + '%d' % (a_change[q-1]))
content_change[q-1] = content_change[q-1].replace('total_time_steps = ' + '%d' % (t), 'total_time_steps = ' + '%d' % (t_change[q-1]))
f = open('test-parallel-2D_' + '%d' % (q) + '.config', 'w')
f.write(content_change[q-1])
f.close()
return "test-parallel-2D"
elif len(input_parameters) == 3:
C = t * pow(a, 1)
q = 0
a_change = range(4); t_change = range(4); content_change = ["","","",""]
for i in 6, 4, 2, 1:
q += 1
a_change[q-1] = a * i / 8
t_change[q-1] = C / pow(a_change[q-1], 1)
content_change[q-1] = content
content_change[q-1] = content_change[q-1].replace('length_x = ' + '%d' % (a), 'length_x = ' + '%d' % (a_change[q-1]))
content_change[q-1] = content_change[q-1].replace('total_time_steps = ' + '%d' % (t), 'total_time_steps = ' + '%d' % (t_change[q-1]))
f = open('test-parallel-1D_' + '%d' % (q) + '.config', 'w')
f.write(content_change[q-1])
f.close()
return "test-parallel-1D"
else:
print "Failed to read parameters"
def RunConfigs (result, path, nodes_number, max_proc_number, number, Nodes, Procs, Means, Acceleration, Efficiency, opt_n, opt_N, optimal_procs, optimal_nodes):
for q in 1,2,3:
if result == "test-parallel-3D":
ParametricRunNodes(path, binary_file, 'test-parallel-3D_' + '%d' % (q) + '.config', nodes_number, max_proc_number, number, Nodes, Procs, Means)
elif result == "test-parallel-2D":
ParametricRunNodes(path, binary_file, 'test-parallel-2D_' + '%d' % (q) + '.config', nodes_number, max_proc_number, number, Nodes, Procs, Means)
elif result == "test-parallel-1D":
ParametricRunNodes(path, binary_file, 'test-parallel-1D_' + '%d' % (q) + '.config', nodes_number, max_proc_number, number, Nodes, Procs, Means)
GetAccelerationAndEfficiencyNodes(Nodes, Procs, Means, Acceleration, Efficiency)
GetOptimalNodes(opt_N, opt_n, Nodes, Procs, Means)
optimal_procs[q] = opt_n[0]
optimal_nodes[q] = opt_N[0]
if result == "test-parallel-3D":
WriteDataNodes(binary_file, 'test-parallel-3D_' + '%d' % (q) + '.config', Nodes, Procs, Means, Acceleration, Efficiency)
WriteOptimalsNodes(binary_file, 'test-parallel-3D_' + '%d' % (q) + '.config', opt_N[0], opt_n[0], "mean time")
DisplayOptimals(opt_N, opt_n, "Mean time", binary_file, 'test-parallel-3D_' + '%d' % (q) + '.config')
elif result == "test-parallel-2D":
WriteDataNodes(binary_file, 'test-parallel-2D_' + '%d' % (q) + '.config', Nodes, Procs, Means, Acceleration, Efficiency)
WriteOptimalsNodes(binary_file, 'test-parallel-2D_' + '%d' % (q) + '.config', opt_N[0], opt_n[0], "mean time")
DisplayOptimals(opt_N, opt_n, "Mean time", binary_file, 'test-parallel-2D_' + '%d' % (q) + '.config')
elif result == "test-parallel-1D":
WriteDataNodes(binary_file, 'test-parallel-1D_' + '%d' % (q) + '.config', Nodes, Procs, Means, Acceleration, Efficiency)
WriteOptimalsNodes(binary_file, 'test-parallel-1D_' + '%d' % (q) + '.config', opt_N[0], opt_n[0], "mean time")
DisplayOptimals(opt_N, opt_n, "Mean time", binary_file, 'test-parallel-1D_' + '%d' % (q) + '.config')
GetOptimalEfficiencyNodes(opt_N, opt_n, Nodes, Procs, Efficiency)
optimal_procs[q] = opt_n[0]
optimal_nodes[q] = opt_N[0]
if result == "test-parallel-3D":
WriteOptimalsNodes(binary_file, 'test-parallel-3D_' + '%d' % (q) + '.config', opt_N[0], opt_n[0], "efficiency")
DisplayOptimals(opt_N, opt_n, "Efficiency", binary_file, 'test-parallel-3D_' + '%d' % (q) + '.config')
elif result == "test-parallel-2D":
WriteOptimalsNodes(binary_file, 'test-parallel-2D_' + '%d' % (q) + '.config', opt_N[0], opt_n[0], "efficiency")
DisplayOptimals(opt_N, opt_n, "Efficiency", binary_file, 'test-parallel-2D_' + '%d' % (q) + '.config')
elif result == "test-parallel-1D":
WriteOptimalsNodes(binary_file, 'test-parallel-1D_' + '%d' % (q) + '.config', opt_N[0], opt_n[0], "efficiency")
DisplayOptimals(opt_N, opt_n, "Efficiency", binary_file, 'test-parallel-1D_' + '%d' % (q) + '.config')
#Makes plots for fixed mpirun parameters on configs
def RunConfigFixed(result, path, binary_file, config_file, N, n, Means, number) :
GetVector(Means, 5)
path[0] = "salloc -N " + '%d' % (N) + " -n " + '%d' % (n) + " -p max1hour " + "mpirun --bind-to-core ./" + binary_file + " " + config_file
stmt = "Start(path)"
Means[0] = GetMean(stmt, number)
for q in 1,2,3,4:
if result == "test-parallel-3D":
path[0] = "salloc -N " + '%d' % (N) + " -n " + '%d' % (n) + " -p max1hour " + "mpirun --bind-to-core ./" + binary_file + " test-parallel-3D_" + '%d' % (q) + '.config'
stmt = "Start(path)"
Means[q] = GetMean(stmt, number)
if result == "test-parallel-2D":
path[0] = "salloc -N " + '%d' % (N) + " -n " + '%d' % (n) + " -p max1hour " + "mpirun --bind-to-core ./" + binary_file + " test-parallel-2D_" + '%d' % (q) + '.config'
stmt = "Start(path)"
Means[q] = GetMean(stmt, number)
if result == "test-parallel-1D":
path[0] = "salloc -N " + '%d' % (N) + " -n " + '%d' % (n) + " -p max1hour " + "mpirun --bind-to-core ./" + binary_file + " test-parallel-1D_" + '%d' % (q) + '.config'
stmt = "Start(path)"
Means[q] = GetMean(stmt, number)
def WriteConfigFixed (result, N, n, input_parameters, config_file, Means):
if result == "test-parallel-3D":
C = int(input_parameters[0]) * pow(int(input_parameters[1]), 3)
f = open("Fixed_N_" + '%d' % (N) + "_n_" + '%d' % (n) + "_performance.fdat", 'w')
f.write("# Peformance results for fixed input parameters N = " + '%d' % (N) + " and n = " + '%d' % (n) + " for " + config_file + "\n")
f.write("#Time steps\tSize\tMeans\n")
for i in range(len(Means)):
if i == 0:
f.write(input_parameters[0] + '\t\t' + input_parameters[1] + '\t' + '%f' % (Means[0]) + '\n')
elif i == 4:
a = int(input_parameters[1]) * 1 / 8
t = C / pow(a, 3)
f.write('%d' % (t) + '\t\t' + '%d' % (a) + '\t' + '%f' % (Means[i]) + '\n')
else:
a = int(input_parameters[1]) * (4 - i) / 4
t = C / pow(a, 3)
f.write('%d' % (t) + '\t\t' + '%d' % (a) + '\t' + '%f' % (Means[i]) + '\n')
f.close()
if __name__ == '__main__':
import sys
import socket
import os
import math
import timeit
import numpy
import math
# import pylab
# import matplotlib
# from matplotlib import mlab
# from pylab import *
################################################################################
## Processing files, that are inside the current folder ##
################################################################################
if len(sys.argv) < 2:
print "\n>> Please enter at least the name of binary to be tested along with script's name."
print "\n>> E.g. $ ./exec_script.py binary_name.bin"
print "\n>> If there is a configuration file to your binary, type"
print "\n>> $ ./exec_script.py binary_name.bin config_file_name.txt"
else:
hostname = GetHost()
nodes_number = [1]
max_proc_number = [1]
IsHostKnown(hostname, nodes_number, max_proc_number)
print "\n>> Now processing... " + sys.argv[1] + " on " + '%d' % (nodes_number[0]) + " node(s) using maximum " + '%d' % (max_proc_number[0]) + " mpi processes"
if len(sys.argv) >= 3:
print "\n>> Using predefined config file " + sys.argv[2]
else:
print "\n>> No predefined config file selected, running binary with default parameters"
if len(sys.argv) == 2:
if sys.argv[1] == "onza-fdtd.bin":
config_file = "test-parallel-3D.config"
else:
config_file = ""
else:
binary_file = sys.argv[1]
config_file = sys.argv[2]
path = ["./" + binary_file + " " + config_file]
stmt = "Start(path)"
# number = GetNumber(stmt)
number = 3
if len(sys.argv) == 5:
fixed_node = int(sys.argv[3])
fixed_proc = int(sys.argv[4])
input_parameters = []
print "\n>> Will now start evaluating performance data, using " + '%d' % (number) + " averages, P = 95%"
if nodes_number[0] == 1:
Procs = range(1, max_proc_number[0] + 1)
Means = range(max_proc_number[0])
ParametricRun(path, binary_file, config_file, max_proc_number, number, Means)
optimal = [1]
Acceleration = range(max_proc_number[0])
Efficiency = range(max_proc_number[0])
GetAccelerationAndEfficiency(Means, Acceleration, Efficiency)
GetOptimal(optimal, Means)
DisplayOptimals(nodes_number, optimal, "Mean time")
GetOptimalEfficiency(optimal, Efficiency)
DisplayOptimals(nodes_number, optimal, "Efficiency")
WriteData(nodes_number, Means, Acceleration, Efficiency)
# MakeAndSavePlots(Procs, Means, Acceleration, Efficiency)
PrintFinalMessage()
else:
Nodes = []
Procs = []
Means = []
Acceleration = []
Efficiency = []
opt_N = [1]
opt_n = [1]
if len(sys.argv) <= 3:
print "\n>> Will now start mpirun parameters optimization"
ParametricRunNodes(path, binary_file, config_file, nodes_number, max_proc_number, number, Nodes, Procs, Means)
GetAccelerationAndEfficiencyNodes(Nodes, Procs, Means, Acceleration, Efficiency)
WriteDataNodes(binary_file, config_file, Nodes, Procs, Means, Acceleration, Efficiency)
GetOptimalNodes(opt_N, opt_n, Nodes, Procs, Means)
DisplayOptimals(opt_N, opt_n, "Mean time", binary_file, config_file)
WriteOptimalsNodes(binary_file, config_file, opt_N[0], opt_n[0], "mean time")
GetOptimalEfficiencyNodes(opt_N, opt_n, Nodes, Procs, Efficiency)
DisplayOptimals(opt_N, opt_n, "Efficiency", binary_file, config_file)
WriteOptimalsNodes(binary_file, config_file, opt_N[0], opt_n[0], "efficiency")
if config_file[0:13] == "test-parallel":
if len(sys.argv) == 5:
print "\n>> Will start with fixed mpirun parameters N = " + '%d' % (fixed_node) + " n = " + '%d' % (fixed_proc)
result = ChangeConfig(config_file, input_parameters)
RunConfigFixed(result, path, binary_file, config_file, fixed_node, fixed_proc, Means, number)
WriteConfigFixed (result, fixed_node, fixed_proc, input_parameters, config_file, Means)
else:
print "\n>> Will now start config optimization."
optimal_procs = []
optimal_nodes = []
GetVector(optimal_procs, 4)
GetVector(optimal_nodes, 4)
optimal_procs[0] = opt_n[0]
optimal_nodes[0] = opt_N[0]
result = ChangeConfig(config_file, input_parameters)
RunConfigs (result, path, nodes_number, max_proc_number, number, Nodes, Procs, Means, Acceleration, Efficiency, opt_N, opt_n, optimal_procs, optimal_nodes)
PrintFinalMessage()
################################################################################
## END ##
################################################################################
| gpl-3.0 |
PavelDudarin/sentence-clustering | clusterer_main.py | 1 | 54297 | import time
from time import gmtime, strftime
import random
import re
import math
import pandas as pd
import numpy as np
import pickle
import nltk
from nltk.corpus import stopwords
from nltk.stem.snowball import SnowballStemmer
import collections
import igraph
from typing import List
import sklearn
from sklearn import cluster
from sklearn.metrics.pairwise import cosine_similarity
import skfuzzy as fuzz
from scipy import stats
from pymystem3 import Mystem
from pyaspeller import Word
import gensim
from gensim.models.keyedvectors import KeyedVectors
import hdbscan
from rutermextract import TermExtractor
import experiments_config as exp_conf
import common_units as cu
#one-time download
try:
nltk.data.find("tokenizers/punkt")
except LookupError:
nltk.download("punkt")
try:
nltk.data.find("corpora/stopwords")
except LookupError:
nltk.download("stopwords")
#---------------------------------------------------------------------------
def correctSpelling(line):
stop_sybols = [u'/', u'\\', u'№', u':', u'1', u'2', u'3', u'4' , u'5', u'6', u'7', u'8', u'9', u'0', u'–']
checked_words = []
for word in line.split():
if not any(st in word for st in stop_sybols):
try:
check = Word(word)
if not check.correct and check.spellsafe:
checked_words.append(check.spellsafe.translate({ord(u"-"):" "}))
else:
checked_words.append(word)
except:
pass
return " ".join(checked_words)
def findNounReplacement(stemmer, lemm, pos, word_pos, replace_dict, suffixes):
lemm_abr = stemmer.stem(lemm)
stem_difference = 2
if pos == "VERB":
lemm_abr = re.sub("ся$", "", lemm_abr)
stem_difference = 2
if len(lemm_abr) > 4:
for suff in suffixes:
if re.search(suff+"$", lemm_abr):
lemm_abr = re.sub(suff+"$", "", lemm_abr)
break
if len(lemm_abr) > 3:
for word in word_pos:
if lemm_abr in word:
word_abr = stemmer.stem(word)
if word_pos[word] == "NOUN" and len(word_abr)-len(lemm_abr) < stem_difference:
replace_dict[lemm+"_"+pos] = word+"_"+"NOUN"
break
def addReplacement(lemm, word, lemm_abr, word_abr, word_pos, replace_dict, suffixes):
stem_difference = 2
if word_pos[word] == "VERB":
word_abr = re.sub("ся$", "", word_abr)
stem_difference = 2
for suff in suffixes:
if re.search(suff+"$", word_abr):
word_abr = re.sub(suff+"$", "", word_abr)
break
if len(word_abr)-len(lemm_abr) < stem_difference:
replace_dict[word+"_"+word_pos[word]] = lemm+"_"+"NOUN"
def checkWordList(stemmer, lemm, pos, word_pos, replace_dict):
adj_suff = ['еват','оват','овит','енск','инск','тельн','енн','онн','он','аст','ист','лив','чив','чат','ивн',
'ск', 'ал','ел','ан','ян','ат','ев','ов','ен','ив','ин','ит','шн','уч','юч','яч','к','л','н']
verb_suff = ['ствова','ова','ева','чива','ива','ти','нича','ка','а','я','е','и','ну']
if lemm not in word_pos:
if pos == "ADJ":
findNounReplacement(stemmer, lemm, pos, word_pos, replace_dict, adj_suff)
if pos == "VERB":
findNounReplacement(stemmer, lemm, pos, word_pos, replace_dict, verb_suff)
if pos == "NOUN":
lemm_abr = stemmer.stem(lemm)
lemm_abr = re.sub("(ан)|(ен)$", "", lemm_abr)
if len(lemm_abr) > 3:
for word in word_pos:
if lemm_abr in word:
word_abr = stemmer.stem(word)
if word_pos[word] == "ADJ":
addReplacement(lemm, word, lemm_abr, word_abr, word_pos, replace_dict, adj_suff)
elif word_pos[word] == "VERB":
addReplacement(lemm, word, lemm_abr, word_abr, word_pos, replace_dict, verb_suff)
word_pos[lemm] = pos
def lemmatize(line, m, stop_list, tags_dict, stemmer, word_pos, replace_dict):
ana = m.analyze(line)
lemmas = []
for item in ana:
if len(item) > 1 and len(item["analysis"]) > 0:
lemm = item["analysis"][0]["lex"]
if len(lemm) > 1 and lemm not in stop_list:
pos = item["analysis"][0]["gr"].split("=")[0].split(",")[0]
if pos in tags_dict:
pos = tags_dict[pos]
checkWordList(stemmer, lemm, pos, word_pos, replace_dict)
lemmas.append(lemm+"_"+pos)
return lemmas
def createIndicatorsDataset(m, stemmer, max_inds_count, stopwords_file, in_file, tags_dict_file, ind_file):
print("Start creating Indictors DataSet")
#Загрузка даных
xl_file = pd.ExcelFile(in_file)
ds = xl_file.parse("Лист1")
#clear symbols
# эти символы разбивают слово на два
chars_to_remove = [u'«', u'»', u'!', u'<', u'>', u'?', u',', u'.', u'-', u'(', u')', u'[', u']', u'"', u'•', u'%', u';']
dd = {ord(c):" " for c in chars_to_remove}
dd[ord(u"¬")] = ""
# Загружаем стоп слова
xl_file = pd.ExcelFile(stopwords_file)
ds_stop_words = xl_file.parse("Лист1")
stop_list = set()
for x in ds_stop_words.STOP_WORDS.str.lower().tolist():
if " " in x:
for w in x.split():
stop_list.add(w)
else:
stop_list.add(x)
for w in stopwords.words("russian"):
stop_list.add(w)
print("Кол-во стоп слов: ",len(stop_list))
tags_file = open(tags_dict_file)
tags_dict = { line.split()[0] : line.split()[1] for line in tags_file }
tags_file.close()
print("Кол-во элементов в tags_dict: ", len(tags_dict))
# List of indicators
inds = []
inds_dict = {}
#i = 0
word_pos = {}
replace_dict = {}
inds_list = [(row[0], str(row[1]).upper()) for row in ds[["IND_ID","IND_NAME"]].values.tolist()]
#inds_list = ds.IND_NAME.str.upper().tolist()
if max_inds_count < len(inds_list)*0.9:
sample_list = random.sample(inds_list, max_inds_count)
else:
sample_list = inds_list[0:max_inds_count]
print("Кол-во элементов в sample_list: ", len(sample_list))
for i, line in sample_list:
print('Process', i, "элемент")
#TODO выводить проценты загрузки
if type(line) == str and len(line) > 0:
new_line = line.translate(dd)
new_line = correctSpelling(new_line).upper()
if new_line in inds_dict:
inds.append([i, new_line, inds_dict[new_line], {}])
else:
lemmas = lemmatize(new_line, m, stop_list, tags_dict, stemmer, word_pos, replace_dict)
inds.append([i, new_line, lemmas, {}])
inds_dict[new_line] = lemmas
#i += 1
print("Words to replace with nouns >>", len(replace_dict))
for i in range(len(inds)):
for j in range(len(inds[i][2])):
if inds[i][2][j] in replace_dict:
inds[i][2][j] = replace_dict[inds[i][2][j]]
print("indicators >>", inds[0:10])
output = open(ind_file, "wb")
pickle.dump(inds, output)
output.close()
print("Indicators saved in ", ind_file)
print("\n--------------------------------------------------------\n")
#----------------------------------------------------------------------------------------
def createWordsDataset(m, ind_file, vectors_file,
words_freq_file, words_ds_file, words_dict_file):
print("Start creating Words DataSet")
pkl_file = open(ind_file, "rb")
inds = pickle.load(pkl_file)
pkl_file.close()
print("Inds", len(inds))
words_counter = collections.Counter([w for ind in inds for w in ind[2]]) # получили слова с их количествами
# теперь нормируем важность слова относительно
values = [int(v) for k,v in dict(words_counter).items()]
cntr_mean = np.mean(values)
cntr_std = np.std(values)
print("cntr_mean >> ", cntr_mean)
print("cntr_std >> ", cntr_std)
#words_dict = [ [k, (v-cntr_mean)/cntr_std] for k,v in dict(words_counter).items()]
words_dict = [ [k, (v-cntr_mean)/cntr_std ] for k,v in dict(words_counter).items()
if v <= (cntr_mean+cntr_std)*5]
print("Words cnt: ", len(words_dict))
print("Words (normalized) [0:10]>> ", words_dict[0:10])
#вывести все слова с частотами в отдельный текстовый файл
df = pd.DataFrame(words_dict)
df.to_csv(words_freq_file, index=False, header=True)
print("Words with frequencies saved in ", words_freq_file)
print("\n--------------------------------------------------------\n")
words_cnt = len(words_dict)
words_ds = [[0 for j in range(0, words_cnt)] for i in range(0, words_cnt)]
print("Creating words_ds")
# w2v vectors
model = KeyedVectors.load_word2vec_format(vectors_file, binary=True)
print("Vectors loaded")
for i in range(0, words_cnt):
for j in range(0, words_cnt):
if i==j:
words_ds[i][j] = 1
words_ds[j][i] = 1
else:
sim = 0
w1 = words_dict[i][0]
w2 = words_dict[j][0]
if w1 in model.vocab and w2 in model.vocab:
sim = model.similarity(w1, w2)
words_ds[i][j] = sim
words_ds[j][i] = sim
output = open(words_ds_file, "wb")
pickle.dump(words_ds, output)
output.close()
print("Words DS saved in ", words_ds_file)
output = open(words_dict_file, "wb")
pickle.dump(words_dict, output)
output.close()
print("Words dictionary saved in ", words_dict_file)
#----------------------------------------------------------------------------------------
def calcWeigth(vw1, vw2, ew) -> float:
return ew
def createGraph(words_ds, words, edge_treshold, graph_file_name):
graph_ver_cnt = len(words)
g = igraph.Graph()
g.add_vertices(graph_ver_cnt)
g.vs["name"] = [k[0] for k in words]
g.vs["norm_weight"] = [k[1] for k in words]
edgs = [ (i,j) for i in range(0, graph_ver_cnt) for j in range(0, graph_ver_cnt)
if i>j and words_ds[i][j] >= edge_treshold]
g.add_edges(edgs)
g.es["weight"] = [ calcWeigth(words[i][1], words[j][1], words_ds[i][j])
for i in range(0, graph_ver_cnt)
for j in range(0, graph_ver_cnt) if i>j and words_ds[i][j] > edge_treshold
]
# delete isolated vertices
exclude_list = []
exclude_vs = []
for i in reversed(range(0,graph_ver_cnt)):
if g.vs[i].degree() == 0:
exclude_list.append(g.vs[i]["name"])
exclude_vs.append(i)
g.delete_vertices(exclude_vs)
print("Excluded cnt: ", len(exclude_list))
print("Exclude list [0:50]: ", exclude_list[0:100])
g.write_graphml(graph_file_name)
print("Graph "+graph_file_name+" created.")
igraph.summary(g)
return g
def constructGraph(start_th, words_ds_file, words_dict_file, graph_file_name_pref):
pkl_file = open(words_ds_file, "rb")
words_ds = pickle.load(pkl_file)
pkl_file.close()
print("words_ds len: ", len(words_ds))
pkl_file = open(words_dict_file, "rb")
words = pickle.load(pkl_file)
pkl_file.close()
print("words len: ", len(words))
for th in [start_th]:
graph_main = createGraph(words_ds, words, th,
graph_file_name_pref+str(round(th*100))+".graphml"
)
#----------------------------------------------------------------------------------------
# Hierarchical clustering based on fuzzy connectedness
def deleteEdges(graph: igraph.Graph, edge_threshold: float):
for e in graph.es:
if e["weight"] < edge_threshold:
graph.delete_edges(e.index)
def getSubgraphWeight(graph: igraph.Graph) -> float:
norm_weight = [1 if v <= 0 else v+1 for v in graph.vs["norm_weight"]]
weight = sum(norm_weight[i] for i in range(0, len(graph.vs)) )
return weight
def getSubgraphKeywords(graph: igraph.Graph, keyword_cnt: int) -> List[str]:
degree = graph.degree()
norm_weight = [1 if v <= 0 else v+1 for v in graph.vs["norm_weight"]]
name = graph.vs["name"]
dict = {name[i]: degree[i]*norm_weight[i] for i in range(0, len(graph.vs)) }
return sorted(dict, key=dict.get, reverse=True)[0:min(keyword_cnt, len(graph.vs))]
#возвращает разрезанный граф, всегда больше одного подграфа,
# за исключением случая, когда разрезание противоречит органичениям
# старается получить разбиение с заданным количеством значимых (те которые превратятся в узлы) подграфов
def cutGraph(pGraph: igraph.Graph, edge_threshold: float,
edge_th_step: float,
max_edge_threshold: float,
avg_bouquet: int,
min_subgraph_coeff: float = 10 #коэффициент при котором субграф добавляется в иерархию
) -> [[igraph.Graph], float]:
prev_sgs_cnt = 1
prev_sgs = [pGraph]
sgs = [pGraph]
#пока возможно разбиение
while (edge_threshold<1) and (edge_threshold < max_edge_threshold):
deleteEdges(pGraph, edge_threshold)
comps = pGraph.components(mode="STRONG") #Returns:a VertexClustering object
sgs = comps.subgraphs()
sgs_cnt = sum(1 if (getSubgraphWeight(sg) >= min_subgraph_coeff) else 0 for sg in sgs)
#подходит ли нам такое рзбиение?
#если разбиаение подошло то выходим из цикла
if (prev_sgs_cnt == 1) and (sgs_cnt >= avg_bouquet):
break
else:
# единственная ситуация продолжения разбиения: не достигли среднего
if (prev_sgs_cnt == 1) and (sgs_cnt < avg_bouquet):
prev_sgs_cnt = sgs_cnt
prev_sgs = sgs
else:
# если отклонение от среднего количества на предыдущем шаге было меньше, то возвращаем его
if abs(prev_sgs_cnt - avg_bouquet) < abs(sgs_cnt - avg_bouquet):
sgs = prev_sgs
break
# достигои идеального количества подграфов
else:
break
#шаг для следующего разбиения
edge_threshold += edge_th_step
return [sgs, edge_threshold]
def addLayer(hier_graph: igraph.Graph, graph: igraph.Graph, parent_vtx: str,
layer_n: int,
edge_threshold: float, #уровень с которого нужно начинать разбивать граф (для первого случая это текущий уровень)
edge_th_step: float,
max_edge_threshold: float,
max_layer_cnt: int,
min_subgraph_coeff: float = 10, #коэффициент при котором субграф добавляется в иерархию
keywords_cnt: int = 10, #количество ключевых слов для узла
keyword_coeff: float = 100, #коэффициент значимости первого слова от каждого подграфа
avg_bouquet: int = 4
) -> {str: int}:
#нарежем граф так, чтобы было нужно число подграфов (получим один подграф, только если разрезать не возможно)
edge_threshold = edge_threshold
if layer_n > 1: #первый шаг уже приходит нарезанным, поэтому для него условие ELSE
[sgs, edge_threshold] = cutGraph(graph, edge_threshold, edge_th_step, max_edge_threshold, avg_bouquet, min_subgraph_coeff)
else: #первый шаг особенный
comps = graph.components(mode="STRONG") #Returns:a VertexClustering object
sgs = comps.subgraphs()
if len(sgs) == 1: #если в самом начале нам дали не разбитый граф, то нужно его тоже разбить
[sgs, edge_threshold] = cutGraph(graph, edge_threshold+edge_th_step, edge_th_step, max_edge_threshold, avg_bouquet, min_subgraph_coeff)
#устанавливаем начальные переменные
keywords = {}
prnt_index = len(hier_graph.vs)-1
node_cnt = 1 # если считать с нуля то будет умножение на ноль и пропадут многие ключевые сслова
#проходимся по всем подграфам
for sg in sgs:
sg_keywords = {}
# если вес данного подграфа достин собственного узла в иерархии то
# также у нас теперь не может граф распадаться только на одну вершину, мы этого не допускаем процедурой разрезания
if (getSubgraphWeight(sg) >= min_subgraph_coeff) and (len(sgs) != 1):
# Add vertex
# TODO от такого именования нужно будет уйти на более абстрактное, когда будут ключевые слова сделаны
if len(sg.vs) > keywords_cnt:
vrtx_name = "Layer "+str(layer_n)+" "+" ".join(list(random.sample(sg.vs["name"], 3)))
else:
vrtx_name = "Layer "+str(layer_n)+" "+" ".join(list(sg.vs["name"]))
hier_graph.add_vertex(vrtx_name)
sg_vrtx_indx = len(hier_graph.vs)-1
node_cnt += 1
hier_graph.vs["layer"] = ["Layer "+str(edge_threshold) if x is None else x for x in hier_graph.vs["layer"]]
hier_graph.vs["layer_n"] = [edge_threshold if x is None else x for x in hier_graph.vs["layer_n"]]
hier_graph.vs["graph"] = [sg if x is None else x for x in hier_graph.vs["graph"]]
hier_graph.vs["parent_node"] = ["n"+str(prnt_index) if x is None else x for x in hier_graph.vs["parent_node"]]
hier_graph.add_edge(parent_vtx, vrtx_name)
# Recursion
next_edge_threshold = edge_threshold+edge_th_step
#Условие входа в рекурсию:
#создавали узел
#максимальное число шагов не достигнуто
if (len(sg.vs)>1) and (layer_n < max_layer_cnt) and (next_edge_threshold<1) and (next_edge_threshold < max_edge_threshold):
sg_keywords = addLayer(hier_graph, sg, vrtx_name, layer_n+1, next_edge_threshold, edge_th_step, max_edge_threshold,
max_layer_cnt, min_subgraph_coeff, keywords_cnt, keyword_coeff, avg_bouquet)
i = 0
for k,v in sg_keywords.items(): #пополним список ключевых слов родительской вершины
if i == 0:
keywords[k] = v*keyword_coeff
else:
keywords[k] = v
i += 1
else: # в рекурсию не вошли, значит просто пополняем список ключевых слов родителя
i = 0
for w in getSubgraphKeywords(sg, keywords_cnt):
if i == 0:
keywords[w] = 1*keyword_coeff
else:
keywords[w] = 1
sg_keywords[w] = 1
i += 1
# Для добавленного узла нужно вставить его ключевые слова и количество детей
words = " ".join(sg_keywords.keys())
# print(words)
# print(hier_graph.vs["keywords"][0:5])
hier_graph.vs["keywords"] = [words if i == sg_vrtx_indx else hier_graph.vs["keywords"][i]
for i in range(0, len(hier_graph.vs))
]
# print(hier_graph.vs["keywords"][0:5])
hier_graph.vs["child_node_cnt"] = [len(sg.vs()) if i == sg_vrtx_indx else hier_graph.vs["child_node_cnt"][i]
for i in range(0, len(hier_graph.vs))
]
# если вес данного подграфа НЕ достин собственного узла в иерархии то просто пополняем ключевые слова родителя
else:
# just add keywords
i = 0
for w in getSubgraphKeywords(sg, keywords_cnt):
if i == 0:
keywords[w] = 1*keyword_coeff
else:
keywords[w] = 1
i += 1
keword_list = sorted(keywords, key=keywords.get, reverse=True)[0:keywords_cnt] # у нас уже столько сколько нужно слов
return {k: node_cnt for k in keword_list}
def getEdgeStat(pGraph: igraph.Graph) -> [float, float, float]:
if len(pGraph.es["weight"]) == 0:
return [1,1,1]
max_weight = max(w for w in pGraph.es["weight"])
min_weight = min(w for w in pGraph.es["weight"])
avg_weight = sum(w for w in pGraph.es["weight"])/len(pGraph.es["weight"])
return [min_weight, avg_weight, max_weight]
def doGraphHierarchicalClustering(th, max_inds_count, graph_file_name_pref, hier_graph_file):
#parameters
th_start = th # старовый уровень уничтоженных ребер
th_step = 0.005 # шаг рекурсии по уничтожению ребер
th_max = 0.99 # максимальный уровень до которого уничтожаем ребра
max_depth = 1000 #максимальная глубина рекурсии
avg_bouquet = 5 # целевое количество подузлов в дереве
min_subgraph_coeff = 0.9 #коэффициент при котором субграф добавляется в иерархию, если <=1 то все слова будут в субграфе
keywords_cnt = 10 #количество ключевых слов определяющих узел
keyword_coeff = 100 # множитель для первого слова в ключевых словах от каждого узла (чтобы один узел не затмил своими словами другие)
#load graph
graph_main = igraph.Graph().Load(graph_file_name_pref+str(round(th*100))+".graphml")
print("Main graph summary: ", graph_main.summary())
hier_graph = igraph.Graph() # результирующий иерархический граф
hier_graph.add_vertices(1)
hier_graph.vs["name"] = ["_Моногорода_"]
hier_graph.vs["keywords"] = ["_Моногорода_"]
hier_graph.vs["layer"] = ["Layer "+str(0)]
hier_graph.vs["layer_n"] = [0]
hier_graph.vs["child_node_cnt"] = [len(graph_main.vs)]
hier_graph.vs["parent_node"] = ["n"]
hier_graph.vs["graph"] = [graph_main]
layer = 1
parent_vrtx_name = "_Моногорода_"
mono_dict = addLayer(hier_graph, graph_main, parent_vrtx_name, layer,
th_start, th_step, th_max,
max_depth,
min_subgraph_coeff, keywords_cnt, keyword_coeff,
avg_bouquet
)
words = " ".join(mono_dict.keys())
hier_graph.vs["keywords"] = [words if i == 0 else hier_graph.vs["keywords"][i]
for i in range(0, len(hier_graph.vs))
]
hier_graph.vs["child_cnt"] = hier_graph.degree()
print("Hier graph summary: ", hier_graph.summary())
print("Graphs' samples: ", hier_graph.vs["parent_node"][0:15])
print("Graphs' in hierarchy: ", len(hier_graph.vs["graph"]))
print("Graphs' samples: ", hier_graph.vs["graph"][0:5])
print("One graph words sample: ", hier_graph.vs["graph"][0].vs["name"][0:10])
#Вычислим дополнительные полезные характеристики
# - суммарный вес слов
# - мин макс и среднее значение веса ребер
hier_graph.vs["subgraph_weigth"] = [getSubgraphWeight(sg) for sg in hier_graph.vs["graph"]]
sg_edge_stats = [getEdgeStat(sg) for sg in hier_graph.vs["graph"]]
hier_graph.vs["subgraph_edge_mins"] = [i[0] for i in sg_edge_stats]
hier_graph.vs["subgraph_edge_avgs"] = [i[1] for i in sg_edge_stats]
hier_graph.vs["subgraph_edge_maxs"] = [i[2] for i in sg_edge_stats]
hier_graph.write_graphml(hier_graph_file)
print("Graph file: "+hier_graph_file)
return hier_graph
#---------------------------------------------------------------------------
# Indicators fuzzy clustering
# выполняется всегда сразу после предыдущего пункта, т.к. тут не загружается граф иерархический
def doIndicatorsFuzzyClustering(hier_graph, max_inds_count, ind_file, clustering_results_file):
#load graph
#hier_graph = igraph.Graph().Load(hier_graph_file)
#load indicators
pkl_file = open(ind_file, "rb")
inds = pickle.load(pkl_file)
pkl_file.close()
print("Inds len: ", len(inds))
graphs = hier_graph.vs["graph"]
all_words = graphs[0].vs["name"]
print("All words >> ", all_words[0:100])
for v in hier_graph.vs:
index = v.index
graph = graphs[index]
for i in inds:
i_len = sum( [1 for w in i[2] if w in all_words] ) #длина показателя в словах нашего графа
cnt = sum( [1 for w in i[2] if w in graph.vs["name"] ] )
if i_len > 0 and cnt > 0:
pcnt = cnt/i_len #len(i[2])
i[3][index] = round(pcnt,5)
print("Inds sample [0:10]: ", inds[0:9])
#write indicators
output = open(clustering_results_file+".pkl", "wb")
pickle.dump(inds, output)
output.close()
#write indicators to csv
df = pd.DataFrame(inds)
df.to_csv(clustering_results_file+".csv", index=False, header=True)
print("Indicators clustering results saved in ", clustering_results_file+".csv")
#---------------------------------------------------------------------------
# test fuzzy hierarchical clustering results
def getIndicatorsByNode(pNodeIndex: int, pInds: list) -> []:
ind_list=[]
for i in pInds:
pt = i[3].get(pNodeIndex, None)
if pt != None:
ind_list.append([i, pt])
ind_list.sort(key=lambda arg: arg[1], reverse=True)
return ind_list
def testHierarchicalClusteringResults(hier_graph_file, clustering_results_file):
# load graph
hier_graph = igraph.Graph().Load(hier_graph_file)
#load indicators
pkl_file = open(clustering_results_file+".pkl", "rb")
inds = pickle.load(pkl_file)
pkl_file.close()
for n in [596, 600, 601, 594]:
print("Node ID: ", n, " keywords: ", hier_graph.vs["keywords"][n])
p_inds = getIndicatorsByNode(n, inds)
print("Inds cnt: ", len(p_inds))
print("Inds sample [0:10]:")
for i in p_inds[0:10]:
print(i[1]," - ",i[0][0]," - ",i[0][1])
print("----------------------------------")
#---------------------------------------------------------------------------
#Добавление в граф дополнительных свойств, чтобы потом с ним было удобне работат
# Для более содержательного анализа было бы хорошо модифицировать наш граф, добавив туда еще два свойства
# - количесво показателей ассоциированных с вершний
# - общий вес показателей ассоциированных с вершиной
def addGraphProperties(hier_graph_file, clustering_results_file):
# load graph
hier_graph = igraph.Graph().Load(hier_graph_file)
#load indicators
pkl_file = open(clustering_results_file+".pkl", "rb")
inds = pickle.load(pkl_file)
pkl_file.close()
inds_cnt = []
ind_weigths = []
for v in hier_graph.vs():
p_inds = getIndicatorsByNode(v.index, inds)
inds_cnt.append(len(p_inds))
ind_weigths.append(sum(i[1] for i in p_inds))
hier_graph.vs["inds_cnt"] = inds_cnt
hier_graph.vs["inds_weigth"] = ind_weigths
hier_graph.write_graphml(hier_graph_file)
print("Graph file: ", hier_graph_file)
#---------------------------------------------------------------------------
# Это тоже не шаг, это вспомогательная процедура позволяющая представить граф в более наглядном виде
# Модифицируем граф превратив его в шар
def deleteVertice(graph: igraph.Graph, vertex_index: int):
for e in graph.incident(vertex_index):
for v in graph.es()[e].tuple:
if (v != vertex_index) and (v != 0):
path_len_1 = graph.shortest_paths(0, v)
path_len_2 = graph.shortest_paths(0, vertex_index)
if path_len_2 < path_len_1: #так как у нас дерево то так можно
graph.add_edge(0, v)
graph.delete_vertices(vertex_index)
def transformToSphereGraph(hier_graph_file, sphere_graph_file):
# load graph
hier_graph = igraph.Graph().Load(hier_graph_file)
for v in [90,67,66,65,64,63,62,61,60,34,33,32,31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1]:
deleteVertice(hier_graph, v)
# write down the graph
hier_graph.write_graphml(sphere_graph_file)
print("Graph file: ", sphere_graph_file)
#---------------------------------------------------------------------------
# Тоже не шаг, это отдельная задача
# решим такую задачу: дан набор вершин в графе, нужно оставить только те вершины, которые имеют с ними общие показатели
def getIndicatorRelatedNodesByNodeList(pNodeIndexList: list, pInds: list) -> []:
ind_list={0}
for i in pInds:
nodes = i[3].keys()
b = 0
for node_index in pNodeIndexList:
if node_index in nodes:
b += 1
if b > 0:
for n in nodes:
ind_list.add(n)
for node_index in pNodeIndexList:
ind_list.remove(node_index)
return sorted(list(ind_list), key=int, reverse=True)
def getRelatedIndicatorsByNode(nodes, sphere_graph_file, clustering_results_file,
sphere_graph_by_nodes_file):
# load graph
hier_graph = igraph.Graph().Load(sphere_graph_file)
#load indicators
pkl_file = open(clustering_results_file+".pkl", "rb")
inds = pickle.load(pkl_file)
pkl_file.close()
related_nodes = getIndicatorRelatedNodesByNodeList([69], inds)
print("Related nodes: ", res)
#удалим остальные узлы из графа
node_ids = hier_graph.vs["id"]
for i in reversed(range(0,len(node_ids))):
i_id = int( node_ids[i][1:] )
if i_id not in related_nodes:
hier_graph.delete_vertices(i)
# write down the graph
hier_graph.write_graphml(sphere_graph_by_nodes_file)
print("Graph file: ", sphere_graph_by_nodes_file)
#---------------------------------------------------------------------------
#Шаг 5. Четкая кластеризация показателей (четким и нечетким методами)
# Different experiments
def lognormal(x, mu, sigma):
return (1 / (sigma*x*math.sqrt(2*math.pi)) )* math.exp(-1/2*((math.log(x)-mu)/sigma)**2)
#--------------------------------------------------------------------------
def clusterIndicators(hier_graph_file, clustering_results_file,
hdbscan_results_file, min_cluster_size):
# load graph
hier_graph = igraph.Graph().Load(hier_graph_file)
#load indicators
pkl_file = open(clustering_results_file+".pkl", "rb")
inds = pickle.load(pkl_file)
pkl_file.close()
print("\n Vertex cont >>", len(hier_graph.vs))
#feature creation
features = []
#определим функцию которой будем контролировать влияние узла. Будем считать узлы со слишком малым или слышком большим числом показателей менее значительными
print( hier_graph.vs["inds_cnt"][0:10])
for i in inds:
features.append( [ ( lognormal(hier_graph.vs["inds_cnt"][j], 2.7, 1) * 1000 )
if i[3].get(j) is not None else 0
for j in range(0, len(hier_graph.vs))
if j not in [0]
]
)
# HDBSCAN -- algoritm -----------------------------------------------------------
#clustering
clusterer = hdbscan.HDBSCAN(min_cluster_size=min_cluster_size)
clusterer.fit_predict(features)
#clusters indicators
cl_inds = [ [inds[j][0], inds[j][1], inds[j][2] , clusterer.labels_[j]] for j in range(len(inds))]
# write indicators to csv
df = pd.DataFrame(cl_inds)
df.to_csv(hdbscan_results_file+".csv", index=False, header=True, encoding = "utf-8")
print("Indicators clustering results saved in ", hdbscan_results_file+".csv")
output = open(hdbscan_results_file+".pkl", "wb")
pickle.dump(cl_inds, output)
output.close()
#--------------------------------------------------------------------------
def clusterIndicatorsByW2V(p_ind_file, vectors_file, hdbscan_results_file, min_cluster_size):
"""
:param p_ind_file: файл с показателями которые будем кластеризовать
:param vectors_file: файл с моделью word2vec
:param hdbscan_results_file: файл куда будут записаны результаты
:param min_cluster_size: минимальный размер кластера для алгоритма HDBScan
:return: None НО запишет результаты кластеризации в файл hdbscan_results_file
"""
# load indicators
pkl_file = open(p_ind_file + ".pkl", "rb")
inds = pickle.load(pkl_file)
pkl_file.close()
# print("\n Vertex cont >>", len(hier_graph.vs))
# feature creation
model = KeyedVectors.load_word2vec_format(vectors_file, binary=True)
features = []
print(hier_graph.vs["inds_cnt"][0:10])
for i in inds:
# l_ind_vector = model.word_vec(i[2][0])
l_ind_vector_pos = model.word_vec(i[2][0])
for k in range(1, len(i[2])):
if i[2][k] in model.vocab:
# l_ind_vector = np.add(l_ind_vector, model.word_vec(i[2][k]))
if np.core.multiarray.dot(l_ind_vector, model.word_vec(i[2][k])) > 0: # скалярное произведение векторов
l_ind_vector_pos = np.add(l_ind_vector_pos, model.word_vec(i[2][k]))
else: # ноль можно трактовать как угодно только не 0, поэтому не использую sign
l_ind_vector_pos = np.add(l_ind_vector_pos, -1.0 * model.word_vec(i[2][k]))
# features_w2v.append(l_ind_vector)
# features_w2v_pos.append(l_ind_vector_pos)
features.append(l_ind_vector_pos)
# features.append([(lognormal(hier_graph.vs["inds_cnt"][j], 2.7, 1) * 1000)
# if i[3].get(j) is not None else 0
# for j in range(0, len(hier_graph.vs))
# if j not in [0]
# ]
# )
# HDBSCAN -- algoritm -----------------------------------------------------------
# clustering
clusterer = hdbscan.HDBSCAN(min_cluster_size=min_cluster_size, metric='pyfunc', func=cu.cosine_metric)
clusterer.fit_predict(features)
# clusters indicators
cl_inds = [[inds[j][0], inds[j][1], inds[j][2], clusterer.labels_[j]] for j in range(len(inds))]
# write indicators to csv
df = pd.DataFrame(cl_inds)
df.to_csv(hdbscan_results_file + ".csv", index=False, header=True, encoding="utf-8")
print("Indicators clustering results saved in ", hdbscan_results_file + ".csv")
output = open(hdbscan_results_file + ".pkl", "wb")
pickle.dump(cl_inds, output)
output.close()
#--------------------------------------------------------------------------
def classifyIndicators(hdbscan_results_file, vectors_file, hdbscan_results_classified_file,
noise_output_file=False):
pkl_file = open(hdbscan_results_file+".pkl", "rb")
inds = pickle.load(pkl_file)
pkl_file.close()
n_clusters = list(set([ind[3] for ind in inds]))
n_clusters = n_clusters[:len(n_clusters)-1]
clusters_data = [ [ [ind[0], ind[1], ind[2], ind[3], [] ] for ind in inds if ind[3] == cluster]
for cluster in n_clusters]
noise_data = [ [ind[0], ind[1], ind[2], ind[3], [] ] for ind in inds if ind[3] == -1]
classified_data = []
all_inds_lemmas = [ind[2] for cluster in clusters_data for ind in cluster] + [ind[2] for ind in noise_data]
idf = cu.getIdf(all_inds_lemmas)
model = KeyedVectors.load_word2vec_format(vectors_file, binary=True)
new_clusters = {}
for cluster in clusters_data:
sentence_vectors = []
ind_vec = {}
for ind in cluster:
if len(ind[2]) > 0:
lemm_vectors = [model[lemm] for lemm in ind[2] if lemm in model.vocab]
lemm_weights = [idf[lemm] for lemm in ind[2] if lemm in model.vocab]
if len(lemm_vectors) > 0:
sentence_vector = cu.makeAverageVector(lemm_vectors, lemm_weights)
sentence_vectors.append(sentence_vector)
ind_vec[ind[0]] = sentence_vector
cluster_vector = cu.makeAverageVector(sentence_vectors)
clean_cluster = []
clean_ind_vec = []
for ind in cluster:
if ind[0] in ind_vec:
if cosine_similarity([ind_vec[ind[0]]], [cluster_vector])[0] >= 0.8:
clean_cluster.append(ind)
clean_ind_vec.append(ind_vec[ind[0]])
else:
noise_data.append([ind[0], ind[1], ind[2], -1, []])
if len(clean_ind_vec) > 0:
clean_cluster_vector = cu.makeAverageVector(clean_ind_vec)
new_clusters[cluster[0][3]] = [clean_cluster_vector, clean_cluster]
new_n_clusters = list(new_clusters.keys())
prev_cluster = new_n_clusters[0]
for cluster in new_n_clusters[1:]:
sim = cosine_similarity([new_clusters[prev_cluster][0]], [new_clusters[cluster][0]])[0]
print(prev_cluster, cluster, sim)
if sim >= 0.85:
new_clusters[prev_cluster][1] += new_clusters[cluster][1]
new_clusters[prev_cluster][0] = cu.makeAverageVector([new_clusters[prev_cluster][0], new_clusters[cluster][0]])
del new_clusters[cluster]
else:
prev_cluster = cluster
for cl in new_clusters:
keywords = [word.split("_")[0] for word, freq in model.most_similar(positive = [new_clusters[cl][0]], topn=10)]
new_clusters[cl].append(keywords)
for ind in new_clusters[cl][1]:
if len(ind[2]) > 0:
ind[3] = cl
ind[4] = keywords
classified_data.append(ind)
else:
ind[3] = "-1"
classified_data.append(ind)
noise_left = []
for i, text, lemmas, n, kw in noise_data:
lemm_vectors = [model[lemm] for lemm in lemmas if lemm in model.vocab]
lemm_weights = [idf[lemm] for lemm in lemmas if lemm in model.vocab]
sentence_vector = cu.makeAverageVector(lemm_vectors, lemm_weights)
for cl in new_clusters:
if cosine_similarity([sentence_vector], [new_clusters[cl][0]])[0] >= 0.9:
classified_data.append([i, text, lemmas, cl, new_clusters[cl][2]]) #+"+"
break
if cl == max(new_clusters):
noise_left.append([i, text, lemmas, n, kw])
classified_data += noise_left
df = pd.DataFrame(classified_data)
df.to_csv(hdbscan_results_classified_file+".csv", index=False, header=True, encoding = "utf-8")
print("Indicators classification results saved in ", hdbscan_results_classified_file+".csv")
output = open(hdbscan_results_classified_file+".pkl", "wb")
pickle.dump(classified_data, output)
output.close()
if noise_output_file:
noise_data_output = [ [ind[0], ind[1], ind[2], {}] for ind in noise_left]
output = open(noise_output_file+".pkl", "wb")
pickle.dump(noise_data_output, output)
output.close()
#--------------------------------------------------------------------------
def clusterNoise(m, noise_output_file, path, prefix, suffix, start_th, vectors_file, min_cluster_size):
suffix += "_noise"
words_freq_file = path+prefix+suffix+".csv"
words_ds_file = path+prefix+"words_ds_"+suffix+".pkl"
words_dict_file = path+prefix+"words_"+suffix+".pkl"
graph_file_name_pref = path+prefix+"words_graph_"+suffix+"_th_"
hier_graph_file = path+prefix+"words_hier_graph_"+suffix+"_th_"+str(round(start_th*100))+".graphml"
clustering_results_file = path+prefix+"hier_"+suffix+"_th_"+str(round(start_th*100))
hdbscan_results_file = path+prefix+suffix+"_th_"+str(round(start_th*100))+"_hdbscan"
hdbscan_results_classified_file = path+prefix+suffix+"_th_"+str(round(start_th*100))+"_hdbscan_classified"
pkl_file = open(noise_output_file+".pkl", "rb")
inds = pickle.load(pkl_file)
pkl_file.close()
max_inds_count = len(inds)
createWordsDataset(m, noise_output_file+".pkl", vectors_file, words_freq_file, words_ds_file, words_dict_file)
constructGraph(start_th, words_ds_file, words_dict_file, graph_file_name_pref)
hier_graph = doGraphHierarchicalClustering(start_th, max_inds_count, graph_file_name_pref, hier_graph_file)
doIndicatorsFuzzyClustering(hier_graph, max_inds_count, noise_output_file+".pkl", clustering_results_file)
addGraphProperties(hier_graph_file, clustering_results_file)
clusterIndicators(hier_graph_file, clustering_results_file, hdbscan_results_file, min_cluster_size)
classifyIndicators(hdbscan_results_file, vectors_file, noise_output_file)
#--------------------------------------------------------------------------
def mergeClusteredNoise(hdbscan_results_classified_file, noise_output_file, merged_file):
pkl_file = open(hdbscan_results_classified_file+".pkl", "rb")
inds = pickle.load(pkl_file)
pkl_file.close()
pkl_file = open(noise_output_file+".pkl", "rb")
inds_noise = pickle.load(pkl_file)
pkl_file.close()
n_clusters = list(set([ind[3] for ind in inds]))
n_clusters = n_clusters[:len(n_clusters)-1]
last_cluster = max(n_clusters)+1
merged_data = [ ind for ind in inds for cluster in n_clusters if ind[3] == cluster]
n_noise_clusters = list(set([ind[3] for ind in inds_noise]))
n_noise_clusters = n_noise_clusters[:len(n_noise_clusters)-1]
merged_data += [ [ind[0], ind[1], ind[2], ind[3]+last_cluster, ind[4]] for ind in inds_noise
for cluster in n_noise_clusters if ind[3] == cluster]
merged_data += [ ind for ind in inds_noise if ind[3] == -1]
df = pd.DataFrame(merged_data)
df.to_csv(merged_file+".csv", index=False, header=True, encoding = "utf-8")
output = open(merged_file+".pkl", "wb")
pickle.dump(merged_data, output)
output.close()
#--------------------------------------------------------------------------
def extractKeyterms(term_extractor, merged_file, keyterms_file):
pkl_file = open(merged_file+".pkl", "rb")
inds = pickle.load(pkl_file)
pkl_file.close()
n_clusters = list(set([ind[3] for ind in inds]))
n_clusters = n_clusters[:len(n_clusters)-1]
clusters_data = [ [ ind for ind in inds if ind[3] == cluster] for cluster in n_clusters]
noise_data = [ ind + [] for ind in inds if ind[3] == -1]
inds_with_keyterms = []
for cluster in clusters_data:
term_counter = collections.Counter()
for ind in cluster:
sentence = correctSpelling(ind[1].lower())
term_counter.update([term.normalized for term in term_extractor(sentence)])
keyterms = [term for term in term_counter if term_counter[term]]
for ind in cluster:
inds_with_keyterms.append([ind[0], ind[1], ind[2], ind[3], keyterms, ind[4]])
inds_with_keyterms += noise_data
df = pd.DataFrame(inds_with_keyterms)
df.to_csv(keyterms_file, index=False, header=True, encoding = "utf-8")
print("Cluster keyterms saved in ", keyterms_file)
#--------------------------------------------------------------------------
def main():
m = Mystem()
stemmer = SnowballStemmer("russian")
term_extractor = TermExtractor()
path = "med_inds_clustering/"
max_inds_count = 19035
prefix = "june_inds_"
suffix = "med"
start_th = 0.5 #start threshold for fuzzy graph
in_file = path+"june_inds_med_by_kw.xlsx"
stopwords_file = path+"Стоп слова Топонимы.xlsx"
vectors_file = path+"ruwikiruscorpora_0_300_20.bin"
#vectors_file = path+"wiki.ru.vec" #fasttext
tags_dict_file = path+"tags_dict.txt"
#ind_file = path+prefix+suffix+".pkl"
ind_file = path+"june_inds_med_by_kw.pkl"
words_freq_file = path+prefix+suffix+".csv"
words_ds_file = path+prefix+"words_ds_"+suffix+".pkl"
words_dict_file = path+prefix+"words_"+suffix+".pkl"
graph_file_name_pref = path+prefix+"words_graph_"+suffix+"_th_"
hier_graph_file = path+prefix+"words_hier_graph_"+suffix+"_th_"+str(round(start_th*100))+".graphml"
clustering_results_file = path+prefix+"hier_"+suffix+"_th_"+str(round(start_th*100))
#sphere_graph_file = path+prefix+"words_sphere_graph_"+suffix+"_th_"+str(round(start_th*100))+".graphml"
#sphere_graph_by_nodes_file = path+prefix+"words_sphere_graph_"+suffix+"_th_"+str(round(start_th*100))+"_69.graphml"
hdbscan_results_file = path+prefix+suffix+"_th_"+str(round(start_th*100))+"_hdbscan"
hdbscan_results_classified_file = path+prefix+suffix+"_th_"+str(round(start_th*100))+"_hdbscan_classified"
noise_output_file = path+prefix+suffix+"_th_"+str(round(start_th*100))+"_noise"
merged_file = path+prefix+suffix+"_th_"+str(round(start_th*100))+"_hdbscan_classified_merged"
keyterms_file = path+prefix+suffix+"_th_"+str(round(start_th*100))+"_hdbscan_classified_keyterms.csv"
print("--- Start time %s ---" % strftime("%a, %d %b %Y %H:%M:%S", gmtime()))
start_time = time.time()
"""
createIndicatorsDataset(m, stemmer, max_inds_count, stopwords_file, in_file, tags_dict_file, ind_file)
createWordsDataset(m, ind_file, vectors_file, words_freq_file, words_ds_file, words_dict_file)
constructGraph(start_th, words_ds_file, words_dict_file, graph_file_name_pref)
hier_graph = doGraphHierarchicalClustering(start_th, max_inds_count, graph_file_name_pref, hier_graph_file)
doIndicatorsFuzzyClustering(hier_graph, max_inds_count, ind_file, clustering_results_file)
#testHierarchicalClusteringResults(hier_graph_file, clustering_results_file)
addGraphProperties(hier_graph_file, clustering_results_file)
#transformToSphereGraph(hier_graph_file, sphere_graph_file)
#getRelatedIndicatorsByNode(nodes, sphere_graph_file, clustering_results_file, sphere_graph_by_nodes_file)
"""
clusterIndicators(hier_graph_file, clustering_results_file, hdbscan_results_file, min_cluster_size = 18)
classifyIndicators(hdbscan_results_file, vectors_file, hdbscan_results_classified_file, noise_output_file)
clusterNoise(m, noise_output_file, path, prefix, suffix, start_th, vectors_file, min_cluster_size = 8)
mergeClusteredNoise(hdbscan_results_classified_file, noise_output_file, merged_file)
extractKeyterms(term_extractor, merged_file, keyterms_file)
print("Done")
end_time = time.time()
print("--- %s seconds ---" % (end_time - start_time))
# --------------------------------------------------------------------------
def main_edu():
m = Mystem()
stemmer = SnowballStemmer("russian")
term_extractor = TermExtractor()
#Настраиваем параметры эксперимента
l_exp_conf = exp_conf.getExperimentConfigByName('edu_gen')
path = l_exp_conf['path']
max_inds_count = l_exp_conf['max_inds_count']
prefix = l_exp_conf['prefix']
suffix = l_exp_conf['suffix']
start_th = l_exp_conf['start_th'] # start threshold for fuzzy graph
in_file = l_exp_conf['in_file']
#Настраиваем общие параметры
stopwords_file = "data/" + "Стоп слова Топонимы.xlsx"
vectors_file = "data/" + "ruwikiruscorpora_0_300_20.bin"
# vectors_file = path+"wiki.ru.vec" #fasttext
tags_dict_file = "data/" + "tags_dict.txt"
# ind_file = path+prefix+suffix+".pkl"
ind_file = path + prefix + "ds.pkl"
words_freq_file = path + prefix + "words_freq_"+ suffix + ".csv"
words_ds_file = path + prefix + "words_ds_" + suffix + ".pkl"
words_dict_file = path + prefix + "words_" + suffix + ".pkl"
graph_file_name_pref = path + prefix + "words_graph_" + suffix + "_th_"
hier_graph_file = path + prefix + "words_hier_graph_" + suffix + "_th_" + str(round(start_th * 100)) + ".graphml"
clustering_results_file = path + prefix + "hier_" + suffix + "_th_" + str(round(start_th * 100))
# sphere_graph_file = path+prefix+"words_sphere_graph_"+suffix+"_th_"+str(round(start_th*100))+".graphml"
# sphere_graph_by_nodes_file = path+prefix+"words_sphere_graph_"+suffix+"_th_"+str(round(start_th*100))+"_69.graphml"
hdbscan_results_file = path + prefix + suffix + "_th_" + str(round(start_th * 100)) + "_hdbscan"
hdbscan_results_classified_file = path + prefix + suffix + "_th_" + str(
round(start_th * 100)) + "_hdbscan_classified"
noise_output_file = path + prefix + suffix + "_th_" + str(round(start_th * 100)) + "_noise"
merged_file = path + prefix + suffix + "_th_" + str(round(start_th * 100)) + "_hdbscan_classified_merged"
keyterms_file = path + prefix + suffix + "_th_" + str(round(start_th * 100)) + "_hdbscan_classified_keyterms.csv"
print("--- Start time %s ---" % strftime("%a, %d %b %Y %H:%M:%S", gmtime()))
start_time = time.time()
# createIndicatorsDataset(m, stemmer, max_inds_count, stopwords_file, in_file, tags_dict_file, ind_file)
createWordsDataset(m, ind_file, vectors_file, words_freq_file, words_ds_file, words_dict_file)
constructGraph(start_th, words_ds_file, words_dict_file, graph_file_name_pref)
hier_graph = doGraphHierarchicalClustering(start_th, max_inds_count, graph_file_name_pref, hier_graph_file)
doIndicatorsFuzzyClustering(hier_graph, max_inds_count, ind_file, clustering_results_file)
#testHierarchicalClusteringResults(hier_graph_file, clustering_results_file)
addGraphProperties(hier_graph_file, clustering_results_file)
#transformToSphereGraph(hier_graph_file, sphere_graph_file)
#getRelatedIndicatorsByNode(nodes, sphere_graph_file, clustering_results_file, sphere_graph_by_nodes_file)
"""
clusterIndicators(hier_graph_file, clustering_results_file, hdbscan_results_file, min_cluster_size=18)
classifyIndicators(hdbscan_results_file, vectors_file, hdbscan_results_classified_file, noise_output_file)
clusterNoise(m, noise_output_file, path, prefix, suffix, start_th, vectors_file, min_cluster_size=8)
mergeClusteredNoise(hdbscan_results_classified_file, noise_output_file, merged_file)
extractKeyterms(term_extractor, merged_file, keyterms_file)
"""
print("Done")
end_time = time.time()
print("--- %s seconds ---" % (end_time - start_time))
if __name__ == "__main__":
main_edu()
| apache-2.0 |
vigilv/scikit-learn | examples/linear_model/plot_polynomial_interpolation.py | 251 | 1895 | #!/usr/bin/env python
"""
========================
Polynomial interpolation
========================
This example demonstrates how to approximate a function with a polynomial of
degree n_degree by using ridge regression. Concretely, from n_samples 1d
points, it suffices to build the Vandermonde matrix, which is n_samples x
n_degree+1 and has the following form:
[[1, x_1, x_1 ** 2, x_1 ** 3, ...],
[1, x_2, x_2 ** 2, x_2 ** 3, ...],
...]
Intuitively, this matrix can be interpreted as a matrix of pseudo features (the
points raised to some power). The matrix is akin to (but different from) the
matrix induced by a polynomial kernel.
This example shows that you can do non-linear regression with a linear model,
using a pipeline to add non-linear features. Kernel methods extend this idea
and can induce very high (even infinite) dimensional feature spaces.
"""
print(__doc__)
# Author: Mathieu Blondel
# Jake Vanderplas
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import Ridge
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
def f(x):
""" function to approximate by polynomial interpolation"""
return x * np.sin(x)
# generate points used to plot
x_plot = np.linspace(0, 10, 100)
# generate points and keep a subset of them
x = np.linspace(0, 10, 100)
rng = np.random.RandomState(0)
rng.shuffle(x)
x = np.sort(x[:20])
y = f(x)
# create matrix versions of these arrays
X = x[:, np.newaxis]
X_plot = x_plot[:, np.newaxis]
plt.plot(x_plot, f(x_plot), label="ground truth")
plt.scatter(x, y, label="training points")
for degree in [3, 4, 5]:
model = make_pipeline(PolynomialFeatures(degree), Ridge())
model.fit(X, y)
y_plot = model.predict(X_plot)
plt.plot(x_plot, y_plot, label="degree %d" % degree)
plt.legend(loc='lower left')
plt.show()
| bsd-3-clause |
MetrodataTeam/incubator-airflow | airflow/hooks/dbapi_hook.py | 17 | 9454 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from builtins import str
from past.builtins import basestring
from datetime import datetime
from contextlib import closing
import numpy
import logging
import sys
from sqlalchemy import create_engine
from airflow.hooks.base_hook import BaseHook
from airflow.exceptions import AirflowException
class DbApiHook(BaseHook):
"""
Abstract base class for sql hooks.
"""
# Override to provide the connection name.
conn_name_attr = None
# Override to have a default connection id for a particular dbHook
default_conn_name = 'default_conn_id'
# Override if this db supports autocommit.
supports_autocommit = False
# Override with the object that exposes the connect method
connector = None
def __init__(self, *args, **kwargs):
if not self.conn_name_attr:
raise AirflowException("conn_name_attr is not defined")
elif len(args) == 1:
setattr(self, self.conn_name_attr, args[0])
elif self.conn_name_attr not in kwargs:
setattr(self, self.conn_name_attr, self.default_conn_name)
else:
setattr(self, self.conn_name_attr, kwargs[self.conn_name_attr])
def get_conn(self):
"""Returns a connection object
"""
db = self.get_connection(getattr(self, self.conn_name_attr))
return self.connector.connect(
host=db.host,
port=db.port,
username=db.login,
schema=db.schema)
def get_uri(self):
conn = self.get_connection(getattr(self, self.conn_name_attr))
login = ''
if conn.login:
login = '{conn.login}:{conn.password}@'.format(conn=conn)
host = conn.host
if conn.port is not None:
host += ':{port}'.format(port=conn.port)
return '{conn.conn_type}://{login}{host}/{conn.schema}'.format(
conn=conn, login=login, host=host)
def get_sqlalchemy_engine(self, engine_kwargs=None):
if engine_kwargs is None:
engine_kwargs = {}
return create_engine(self.get_uri(), **engine_kwargs)
def get_pandas_df(self, sql, parameters=None):
"""
Executes the sql and returns a pandas dataframe
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
if sys.version_info[0] < 3:
sql = sql.encode('utf-8')
import pandas.io.sql as psql
with closing(self.get_conn()) as conn:
return psql.read_sql(sql, con=conn, params=parameters)
def get_records(self, sql, parameters=None):
"""
Executes the sql and returns a set of records.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
if sys.version_info[0] < 3:
sql = sql.encode('utf-8')
with closing(self.get_conn()) as conn:
with closing(conn.cursor()) as cur:
if parameters is not None:
cur.execute(sql, parameters)
else:
cur.execute(sql)
return cur.fetchall()
def get_first(self, sql, parameters=None):
"""
Executes the sql and returns the first resulting row.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
if sys.version_info[0] < 3:
sql = sql.encode('utf-8')
with closing(self.get_conn()) as conn:
with closing(conn.cursor()) as cur:
if parameters is not None:
cur.execute(sql, parameters)
else:
cur.execute(sql)
return cur.fetchone()
def run(self, sql, autocommit=False, parameters=None):
"""
Runs a command or a list of commands. Pass a list of sql
statements to the sql parameter to get them to execute
sequentially
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param autocommit: What to set the connection's autocommit setting to
before executing the query.
:type autocommit: bool
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
if isinstance(sql, basestring):
sql = [sql]
with closing(self.get_conn()) as conn:
if self.supports_autocommit:
self.set_autocommit(conn, autocommit)
with closing(conn.cursor()) as cur:
for s in sql:
if sys.version_info[0] < 3:
s = s.encode('utf-8')
logging.info(s)
if parameters is not None:
cur.execute(s, parameters)
else:
cur.execute(s)
conn.commit()
def set_autocommit(self, conn, autocommit):
conn.autocommit = autocommit
def get_cursor(self):
"""
Returns a cursor
"""
return self.get_conn().cursor()
def insert_rows(self, table, rows, target_fields=None, commit_every=1000):
"""
A generic way to insert a set of tuples into a table,
a new transaction is created every commit_every rows
:param table: Name of the target table
:type table: str
:param rows: The rows to insert into the table
:type rows: iterable of tuples
:param target_fields: The names of the columns to fill in the table
:type target_fields: iterable of strings
:param commit_every: The maximum number of rows to insert in one
transaction. Set to 0 to insert all rows in one transaction.
:type commit_every: int
"""
if target_fields:
target_fields = ", ".join(target_fields)
target_fields = "({})".format(target_fields)
else:
target_fields = ''
with closing(self.get_conn()) as conn:
if self.supports_autocommit:
self.set_autocommit(conn, False)
conn.commit()
with closing(conn.cursor()) as cur:
for i, row in enumerate(rows, 1):
l = []
for cell in row:
l.append(self._serialize_cell(cell, conn))
values = tuple(l)
placeholders = ["%s",]*len(values)
sql = "INSERT INTO {0} {1} VALUES ({2});".format(
table,
target_fields,
",".join(placeholders))
cur.execute(sql, values)
if commit_every and i % commit_every == 0:
conn.commit()
logging.info(
"Loaded {i} into {table} rows so far".format(**locals()))
conn.commit()
logging.info(
"Done loading. Loaded a total of {i} rows".format(**locals()))
@staticmethod
def _serialize_cell(cell, conn=None):
"""
Returns the SQL literal of the cell as a string.
:param cell: The cell to insert into the table
:type cell: object
:param conn: The database connection
:type conn: connection object
:return: The serialized cell
:rtype: str
"""
if cell is None:
return None
if isinstance(cell, datetime):
return cell.isoformat()
return str(cell)
def bulk_dump(self, table, tmp_file):
"""
Dumps a database table into a tab-delimited file
:param table: The name of the source table
:type table: str
:param tmp_file: The path of the target file
:type tmp_file: str
"""
raise NotImplementedError()
def bulk_load(self, table, tmp_file):
"""
Loads a tab-delimited file into a database table
:param table: The name of the target table
:type table: str
:param tmp_file: The path of the file to load into the table
:type tmp_file: str
"""
raise NotImplementedError()
| apache-2.0 |
robertsj/ME701_examples | gui/mplcanvas.py | 1 | 1389 | #!/usr/bin/python
from __future__ import division
import sys
import numpy as np
import matplotlib
matplotlib.use('Qt5Agg')
from PyQt5.QtWidgets import QApplication, QSizePolicy
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
class MatplotlibCanvas(FigureCanvas) :
""" This is borrowed heavily from the matplotlib documentation;
specifically, see:
http://matplotlib.org/examples/user_interfaces/embedding_in_qt5.html
"""
def __init__(self, parent=None, width=5, height=4, dpi=100) :
self.fig = Figure(figsize=(width, height), dpi=dpi)
self.axes = self.fig.add_subplot(111)
self.axes.hold(False)
self.compute_initial_figure()
FigureCanvas.__init__(self, self.fig)
self.setParent(parent)
FigureCanvas.setSizePolicy(self, QSizePolicy.Expanding, QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
def compute_initial_figure(self):
self.x = np.arange(0.0, 3.0, 0.01)
self.y = np.sin(2*np.pi*self.x)
self.axes.plot(self.x, self.y)
self.axes.set_xlabel('x')
self.axes.set_ylabel('y(x)')
def redraw(self, x, y) :
self.axes.plot(x, y)
self.draw()
app = QApplication(sys.argv)
widget = MatplotlibCanvas()
widget.show()
app.exec_()
| mit |
LohithBlaze/scikit-learn | sklearn/utils/multiclass.py | 83 | 12343 |
# Author: Arnaud Joly, Joel Nothman, Hamzeh Alsalhi
#
# License: BSD 3 clause
"""
Multi-class / multi-label utility function
==========================================
"""
from __future__ import division
from collections import Sequence
from itertools import chain
from scipy.sparse import issparse
from scipy.sparse.base import spmatrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
import numpy as np
from ..externals.six import string_types
from .validation import check_array
from ..utils.fixes import bincount
def _unique_multiclass(y):
if hasattr(y, '__array__'):
return np.unique(np.asarray(y))
else:
return set(y)
def _unique_indicator(y):
return np.arange(check_array(y, ['csr', 'csc', 'coo']).shape[1])
_FN_UNIQUE_LABELS = {
'binary': _unique_multiclass,
'multiclass': _unique_multiclass,
'multilabel-indicator': _unique_indicator,
}
def unique_labels(*ys):
"""Extract an ordered array of unique labels
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "multiclass-multioutput" input type.
Parameters
----------
*ys : array-likes,
Returns
-------
out : numpy array of shape [n_unique_labels]
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
"""
if not ys:
raise ValueError('No argument has been passed.')
# Check that we don't mix label format
ys_types = set(type_of_target(x) for x in ys)
if ys_types == set(["binary", "multiclass"]):
ys_types = set(["multiclass"])
if len(ys_types) > 1:
raise ValueError("Mix type of y not allowed, got types %s" % ys_types)
label_type = ys_types.pop()
# Check consistency for the indicator format
if (label_type == "multilabel-indicator" and
len(set(check_array(y, ['csr', 'csc', 'coo']).shape[1]
for y in ys)) > 1):
raise ValueError("Multi-label binary indicator input with "
"different numbers of labels")
# Get the unique set of labels
_unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)
if not _unique_labels:
raise ValueError("Unknown label type: %s" % repr(ys))
ys_labels = set(chain.from_iterable(_unique_labels(y) for y in ys))
# Check that we don't mix string type with number type
if (len(set(isinstance(label, string_types) for label in ys_labels)) > 1):
raise ValueError("Mix of label input types (string and number)")
return np.array(sorted(ys_labels))
def _is_integral_float(y):
return y.dtype.kind == 'f' and np.all(y.astype(int) == y)
def is_multilabel(y):
""" Check if ``y`` is in a multilabel format.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a multilabel format, else ```False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel([[1], [0, 2], []])
False
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
True
"""
if hasattr(y, '__array__'):
y = np.asarray(y)
if not (hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1):
return False
if issparse(y):
if isinstance(y, (dok_matrix, lil_matrix)):
y = y.tocsr()
return (len(y.data) == 0 or np.ptp(y.data) == 0 and
(y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(np.unique(y.data))))
else:
labels = np.unique(y)
return len(labels) < 3 and (y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(labels))
def type_of_target(y):
"""Determine the type of data indicated by target `y`
Parameters
----------
y : array-like
Returns
-------
target_type : string
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, sequence of sequences, or an array of non-sequence objects.
Examples
--------
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1.0, 2.0])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target([1.0, 0.0, 3.0])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target([[1, 2]])
'multiclass-multioutput'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__'))
and not isinstance(y, string_types))
if not valid:
raise ValueError('Expected array-like (array or non-string sequence), '
'got %r' % y)
if is_multilabel(y):
return 'multilabel-indicator'
try:
y = np.asarray(y)
except ValueError:
# Known to fail in numpy 1.3 for array of arrays
return 'unknown'
# The old sequence of sequences format
try:
if (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence)
and not isinstance(y[0], string_types)):
raise ValueError('You appear to be using a legacy multi-label data'
' representation. Sequence of sequences are no'
' longer supported; use a binary array or sparse'
' matrix instead.')
except IndexError:
pass
# Invalid inputs
if y.ndim > 2 or (y.dtype == object and len(y) and
not isinstance(y.flat[0], string_types)):
return 'unknown' # [[[1, 2]]] or [obj_1] and not ["label_1"]
if y.ndim == 2 and y.shape[1] == 0:
return 'unknown' # [[]]
if y.ndim == 2 and y.shape[1] > 1:
suffix = "-multioutput" # [[1, 2], [1, 2]]
else:
suffix = "" # [1, 2, 3] or [[1], [2], [3]]
# check float and contains non-integer float values
if y.dtype.kind == 'f' and np.any(y != y.astype(int)):
# [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]
return 'continuous' + suffix
if (len(np.unique(y)) > 2) or (y.ndim >= 2 and len(y[0]) > 1):
return 'multiclass' + suffix # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]
else:
return 'binary' # [1, 2] or [["a"], ["b"]]
def _check_partial_fit_first_call(clf, classes=None):
"""Private helper function for factorizing common classes param logic
Estimators that implement the ``partial_fit`` API need to be provided with
the list of possible classes at the first call to partial_fit.
Subsequent calls to partial_fit should check that ``classes`` is still
consistent with a previous value of ``clf.classes_`` when provided.
This function returns True if it detects that this was the first call to
``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also
set on ``clf``.
"""
if getattr(clf, 'classes_', None) is None and classes is None:
raise ValueError("classes must be passed on the first call "
"to partial_fit.")
elif classes is not None:
if getattr(clf, 'classes_', None) is not None:
if not np.all(clf.classes_ == unique_labels(classes)):
raise ValueError(
"`classes=%r` is not the same as on last call "
"to partial_fit, was: %r" % (classes, clf.classes_))
else:
# This is the first call to partial_fit
clf.classes_ = unique_labels(classes)
return True
# classes is None and clf.classes_ has already previously been set:
# nothing to do
return False
def class_distribution(y, sample_weight=None):
"""Compute class priors from multioutput-multiclass target data
Parameters
----------
y : array like or sparse matrix of size (n_samples, n_outputs)
The labels for each example.
sample_weight : array-like of shape = (n_samples,), optional
Sample weights.
Returns
-------
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
n_classes : list of integrs of size n_outputs
Number of classes in each column
class_prior : list of size n_outputs of arrays of size (n_classes,)
Class distribution of each column.
"""
classes = []
n_classes = []
class_prior = []
n_samples, n_outputs = y.shape
if issparse(y):
y = y.tocsc()
y_nnz = np.diff(y.indptr)
for k in range(n_outputs):
col_nonzero = y.indices[y.indptr[k]:y.indptr[k + 1]]
# separate sample weights for zero and non-zero elements
if sample_weight is not None:
nz_samp_weight = np.asarray(sample_weight)[col_nonzero]
zeros_samp_weight_sum = (np.sum(sample_weight) -
np.sum(nz_samp_weight))
else:
nz_samp_weight = None
zeros_samp_weight_sum = y.shape[0] - y_nnz[k]
classes_k, y_k = np.unique(y.data[y.indptr[k]:y.indptr[k + 1]],
return_inverse=True)
class_prior_k = bincount(y_k, weights=nz_samp_weight)
# An explicit zero was found, combine its wieght with the wieght
# of the implicit zeros
if 0 in classes_k:
class_prior_k[classes_k == 0] += zeros_samp_weight_sum
# If an there is an implict zero and it is not in classes and
# class_prior, make an entry for it
if 0 not in classes_k and y_nnz[k] < y.shape[0]:
classes_k = np.insert(classes_k, 0, 0)
class_prior_k = np.insert(class_prior_k, 0,
zeros_samp_weight_sum)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior.append(class_prior_k / class_prior_k.sum())
else:
for k in range(n_outputs):
classes_k, y_k = np.unique(y[:, k], return_inverse=True)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior_k = bincount(y_k, weights=sample_weight)
class_prior.append(class_prior_k / class_prior_k.sum())
return (classes, n_classes, class_prior)
| bsd-3-clause |
Srisai85/scikit-learn | examples/decomposition/plot_faces_decomposition.py | 204 | 4452 | """
============================
Faces dataset decompositions
============================
This example applies to :ref:`olivetti_faces` different unsupervised
matrix decomposition (dimension reduction) methods from the module
:py:mod:`sklearn.decomposition` (see the documentation chapter
:ref:`decompositions`) .
"""
print(__doc__)
# Authors: Vlad Niculae, Alexandre Gramfort
# License: BSD 3 clause
import logging
from time import time
from numpy.random import RandomState
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.cluster import MiniBatchKMeans
from sklearn import decomposition
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
n_row, n_col = 2, 3
n_components = n_row * n_col
image_shape = (64, 64)
rng = RandomState(0)
###############################################################################
# Load faces data
dataset = fetch_olivetti_faces(shuffle=True, random_state=rng)
faces = dataset.data
n_samples, n_features = faces.shape
# global centering
faces_centered = faces - faces.mean(axis=0)
# local centering
faces_centered -= faces_centered.mean(axis=1).reshape(n_samples, -1)
print("Dataset consists of %d faces" % n_samples)
###############################################################################
def plot_gallery(title, images, n_col=n_col, n_row=n_row):
plt.figure(figsize=(2. * n_col, 2.26 * n_row))
plt.suptitle(title, size=16)
for i, comp in enumerate(images):
plt.subplot(n_row, n_col, i + 1)
vmax = max(comp.max(), -comp.min())
plt.imshow(comp.reshape(image_shape), cmap=plt.cm.gray,
interpolation='nearest',
vmin=-vmax, vmax=vmax)
plt.xticks(())
plt.yticks(())
plt.subplots_adjust(0.01, 0.05, 0.99, 0.93, 0.04, 0.)
###############################################################################
# List of the different estimators, whether to center and transpose the
# problem, and whether the transformer uses the clustering API.
estimators = [
('Eigenfaces - RandomizedPCA',
decomposition.RandomizedPCA(n_components=n_components, whiten=True),
True),
('Non-negative components - NMF',
decomposition.NMF(n_components=n_components, init='nndsvda', beta=5.0,
tol=5e-3, sparseness='components'),
False),
('Independent components - FastICA',
decomposition.FastICA(n_components=n_components, whiten=True),
True),
('Sparse comp. - MiniBatchSparsePCA',
decomposition.MiniBatchSparsePCA(n_components=n_components, alpha=0.8,
n_iter=100, batch_size=3,
random_state=rng),
True),
('MiniBatchDictionaryLearning',
decomposition.MiniBatchDictionaryLearning(n_components=15, alpha=0.1,
n_iter=50, batch_size=3,
random_state=rng),
True),
('Cluster centers - MiniBatchKMeans',
MiniBatchKMeans(n_clusters=n_components, tol=1e-3, batch_size=20,
max_iter=50, random_state=rng),
True),
('Factor Analysis components - FA',
decomposition.FactorAnalysis(n_components=n_components, max_iter=2),
True),
]
###############################################################################
# Plot a sample of the input data
plot_gallery("First centered Olivetti faces", faces_centered[:n_components])
###############################################################################
# Do the estimation and plot it
for name, estimator, center in estimators:
print("Extracting the top %d %s..." % (n_components, name))
t0 = time()
data = faces
if center:
data = faces_centered
estimator.fit(data)
train_time = (time() - t0)
print("done in %0.3fs" % train_time)
if hasattr(estimator, 'cluster_centers_'):
components_ = estimator.cluster_centers_
else:
components_ = estimator.components_
if hasattr(estimator, 'noise_variance_'):
plot_gallery("Pixelwise variance",
estimator.noise_variance_.reshape(1, -1), n_col=1,
n_row=1)
plot_gallery('%s - Train time %.1fs' % (name, train_time),
components_[:n_components])
plt.show()
| bsd-3-clause |
lanbing510/DensityPeakCluster | plot.py | 1 | 2642 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import logging
import numpy as np
from cluster import *
from sklearn import manifold
from plot_utils import *
def plot_rho_delta(rho, delta):
'''
Plot scatter diagram for rho-delta points
Args:
rho : rho list
delta : delta list
'''
logger.info("PLOT: rho-delta plot")
plot_scatter_diagram(0, rho[1:], delta[1:], x_label='rho', y_label='delta', title='Decision Graph')
plt.savefig('Decision Graph.jpg')
def plot_cluster(cluster):
'''
Plot scatter diagram for final points that using multi-dimensional scaling for data
Args:
cluster : DensityPeakCluster object
'''
logger.info("PLOT: cluster result, start multi-dimensional scaling")
dp = np.zeros((cluster.max_id, cluster.max_id), dtype = np.float32)
cls = []
for i in xrange(1, cluster.max_id):
for j in xrange(i + 1, cluster.max_id + 1):
dp[i - 1, j - 1] = cluster.distances[(i, j)]
dp[j - 1, i - 1] = cluster.distances[(i, j)]
cls.append(cluster.cluster[i])
cls.append(cluster.cluster[cluster.max_id])
cls = np.array(cls, dtype = np.float32)
fo = open(r'./tmp.txt', 'w')
fo.write('\n'.join(map(str, cls)))
fo.close()
#seed = np.random.RandomState(seed=3)
mds = manifold.MDS(max_iter=200, eps=1e-4, n_init=1,dissimilarity='precomputed')
dp_mds = mds.fit_transform(dp.astype(np.float64))
logger.info("PLOT: end mds, start plot")
plot_scatter_diagram(1, dp_mds[:, 0], dp_mds[:, 1], title='2D Nonclassical Multidimensional Scaling', style_list = cls)
plt.savefig("2D Nonclassical Multidimensional Scaling.jpg")
def plot_rhodelta_rho(rho, delta):
'''
Plot scatter diagram for rho*delta_rho points
Args:
rho : rho list
delta : delta list
'''
logger.info("PLOT: rho*delta_rho plot")
y=rho*delta
r_index=np.argsort(-y)
x=np.zeros(y.shape[0])
idx=0
for r in r_index:
x[r]=idx
idx+=1
plt.figure(2)
plt.clf()
plt.scatter(x,y)
plt.xlabel('sorted rho')
plt.ylabel('rho*delta')
plt.title("Decision Graph RhoDelta-Rho")
plt.show()
plt.savefig('Decision Graph RhoDelta-Rho.jpg')
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
dpcluster = DensityPeakCluster()
# dpcluster.local_density(load_paperdata, './example_distances.dat')
# plot_rho_delta(rho, delta) #plot to choose the threthold
rho, delta, nneigh = dpcluster.cluster(load_paperdata, './data/data_in_paper/example_distances.dat', 20, 0.1)
logger.info(str(len(dpcluster.ccenter)) + ' center as below')
for idx, center in dpcluster.ccenter.items():
logger.info('%d %f %f' %(idx, rho[center], delta[center]))
plot_cluster(dpcluster) | mit |
Newman101/scipy | scipy/signal/wavelets.py | 67 | 10523 | from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.dual import eig
from scipy.special import comb
from scipy import linspace, pi, exp
from scipy.signal import convolve
__all__ = ['daub', 'qmf', 'cascade', 'morlet', 'ricker', 'cwt']
def daub(p):
"""
The coefficients for the FIR low-pass filter producing Daubechies wavelets.
p>=1 gives the order of the zero at f=1/2.
There are 2p filter coefficients.
Parameters
----------
p : int
Order of the zero at f=1/2, can have values from 1 to 34.
Returns
-------
daub : ndarray
Return
"""
sqrt = np.sqrt
if p < 1:
raise ValueError("p must be at least 1.")
if p == 1:
c = 1 / sqrt(2)
return np.array([c, c])
elif p == 2:
f = sqrt(2) / 8
c = sqrt(3)
return f * np.array([1 + c, 3 + c, 3 - c, 1 - c])
elif p == 3:
tmp = 12 * sqrt(10)
z1 = 1.5 + sqrt(15 + tmp) / 6 - 1j * (sqrt(15) + sqrt(tmp - 15)) / 6
z1c = np.conj(z1)
f = sqrt(2) / 8
d0 = np.real((1 - z1) * (1 - z1c))
a0 = np.real(z1 * z1c)
a1 = 2 * np.real(z1)
return f / d0 * np.array([a0, 3 * a0 - a1, 3 * a0 - 3 * a1 + 1,
a0 - 3 * a1 + 3, 3 - a1, 1])
elif p < 35:
# construct polynomial and factor it
if p < 35:
P = [comb(p - 1 + k, k, exact=1) for k in range(p)][::-1]
yj = np.roots(P)
else: # try different polynomial --- needs work
P = [comb(p - 1 + k, k, exact=1) / 4.0**k
for k in range(p)][::-1]
yj = np.roots(P) / 4
# for each root, compute two z roots, select the one with |z|>1
# Build up final polynomial
c = np.poly1d([1, 1])**p
q = np.poly1d([1])
for k in range(p - 1):
yval = yj[k]
part = 2 * sqrt(yval * (yval - 1))
const = 1 - 2 * yval
z1 = const + part
if (abs(z1)) < 1:
z1 = const - part
q = q * [1, -z1]
q = c * np.real(q)
# Normalize result
q = q / np.sum(q) * sqrt(2)
return q.c[::-1]
else:
raise ValueError("Polynomial factorization does not work "
"well for p too large.")
def qmf(hk):
"""
Return high-pass qmf filter from low-pass
Parameters
----------
hk : array_like
Coefficients of high-pass filter.
"""
N = len(hk) - 1
asgn = [{0: 1, 1: -1}[k % 2] for k in range(N + 1)]
return hk[::-1] * np.array(asgn)
def cascade(hk, J=7):
"""
Return (x, phi, psi) at dyadic points ``K/2**J`` from filter coefficients.
Parameters
----------
hk : array_like
Coefficients of low-pass filter.
J : int, optional
Values will be computed at grid points ``K/2**J``. Default is 7.
Returns
-------
x : ndarray
The dyadic points ``K/2**J`` for ``K=0...N * (2**J)-1`` where
``len(hk) = len(gk) = N+1``.
phi : ndarray
The scaling function ``phi(x)`` at `x`:
``phi(x) = sum(hk * phi(2x-k))``, where k is from 0 to N.
psi : ndarray, optional
The wavelet function ``psi(x)`` at `x`:
``phi(x) = sum(gk * phi(2x-k))``, where k is from 0 to N.
`psi` is only returned if `gk` is not None.
Notes
-----
The algorithm uses the vector cascade algorithm described by Strang and
Nguyen in "Wavelets and Filter Banks". It builds a dictionary of values
and slices for quick reuse. Then inserts vectors into final vector at the
end.
"""
N = len(hk) - 1
if (J > 30 - np.log2(N + 1)):
raise ValueError("Too many levels.")
if (J < 1):
raise ValueError("Too few levels.")
# construct matrices needed
nn, kk = np.ogrid[:N, :N]
s2 = np.sqrt(2)
# append a zero so that take works
thk = np.r_[hk, 0]
gk = qmf(hk)
tgk = np.r_[gk, 0]
indx1 = np.clip(2 * nn - kk, -1, N + 1)
indx2 = np.clip(2 * nn - kk + 1, -1, N + 1)
m = np.zeros((2, 2, N, N), 'd')
m[0, 0] = np.take(thk, indx1, 0)
m[0, 1] = np.take(thk, indx2, 0)
m[1, 0] = np.take(tgk, indx1, 0)
m[1, 1] = np.take(tgk, indx2, 0)
m *= s2
# construct the grid of points
x = np.arange(0, N * (1 << J), dtype=float) / (1 << J)
phi = 0 * x
psi = 0 * x
# find phi0, and phi1
lam, v = eig(m[0, 0])
ind = np.argmin(np.absolute(lam - 1))
# a dictionary with a binary representation of the
# evaluation points x < 1 -- i.e. position is 0.xxxx
v = np.real(v[:, ind])
# need scaling function to integrate to 1 so find
# eigenvector normalized to sum(v,axis=0)=1
sm = np.sum(v)
if sm < 0: # need scaling function to integrate to 1
v = -v
sm = -sm
bitdic = {'0': v / sm}
bitdic['1'] = np.dot(m[0, 1], bitdic['0'])
step = 1 << J
phi[::step] = bitdic['0']
phi[(1 << (J - 1))::step] = bitdic['1']
psi[::step] = np.dot(m[1, 0], bitdic['0'])
psi[(1 << (J - 1))::step] = np.dot(m[1, 1], bitdic['0'])
# descend down the levels inserting more and more values
# into bitdic -- store the values in the correct location once we
# have computed them -- stored in the dictionary
# for quicker use later.
prevkeys = ['1']
for level in range(2, J + 1):
newkeys = ['%d%s' % (xx, yy) for xx in [0, 1] for yy in prevkeys]
fac = 1 << (J - level)
for key in newkeys:
# convert key to number
num = 0
for pos in range(level):
if key[pos] == '1':
num += (1 << (level - 1 - pos))
pastphi = bitdic[key[1:]]
ii = int(key[0])
temp = np.dot(m[0, ii], pastphi)
bitdic[key] = temp
phi[num * fac::step] = temp
psi[num * fac::step] = np.dot(m[1, ii], pastphi)
prevkeys = newkeys
return x, phi, psi
def morlet(M, w=5.0, s=1.0, complete=True):
"""
Complex Morlet wavelet.
Parameters
----------
M : int
Length of the wavelet.
w : float, optional
Omega0. Default is 5
s : float, optional
Scaling factor, windowed from ``-s*2*pi`` to ``+s*2*pi``. Default is 1.
complete : bool, optional
Whether to use the complete or the standard version.
Returns
-------
morlet : (M,) ndarray
See Also
--------
scipy.signal.gausspulse
Notes
-----
The standard version::
pi**-0.25 * exp(1j*w*x) * exp(-0.5*(x**2))
This commonly used wavelet is often referred to simply as the
Morlet wavelet. Note that this simplified version can cause
admissibility problems at low values of `w`.
The complete version::
pi**-0.25 * (exp(1j*w*x) - exp(-0.5*(w**2))) * exp(-0.5*(x**2))
This version has a correction
term to improve admissibility. For `w` greater than 5, the
correction term is negligible.
Note that the energy of the return wavelet is not normalised
according to `s`.
The fundamental frequency of this wavelet in Hz is given
by ``f = 2*s*w*r / M`` where `r` is the sampling rate.
Note: This function was created before `cwt` and is not compatible
with it.
"""
x = linspace(-s * 2 * pi, s * 2 * pi, M)
output = exp(1j * w * x)
if complete:
output -= exp(-0.5 * (w**2))
output *= exp(-0.5 * (x**2)) * pi**(-0.25)
return output
def ricker(points, a):
"""
Return a Ricker wavelet, also known as the "Mexican hat wavelet".
It models the function:
``A (1 - x^2/a^2) exp(-x^2/2 a^2)``,
where ``A = 2/sqrt(3a)pi^1/4``.
Parameters
----------
points : int
Number of points in `vector`.
Will be centered around 0.
a : scalar
Width parameter of the wavelet.
Returns
-------
vector : (N,) ndarray
Array of length `points` in shape of ricker curve.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> points = 100
>>> a = 4.0
>>> vec2 = signal.ricker(points, a)
>>> print(len(vec2))
100
>>> plt.plot(vec2)
>>> plt.show()
"""
A = 2 / (np.sqrt(3 * a) * (np.pi**0.25))
wsq = a**2
vec = np.arange(0, points) - (points - 1.0) / 2
xsq = vec**2
mod = (1 - xsq / wsq)
gauss = np.exp(-xsq / (2 * wsq))
total = A * mod * gauss
return total
def cwt(data, wavelet, widths):
"""
Continuous wavelet transform.
Performs a continuous wavelet transform on `data`,
using the `wavelet` function. A CWT performs a convolution
with `data` using the `wavelet` function, which is characterized
by a width parameter and length parameter.
Parameters
----------
data : (N,) ndarray
data on which to perform the transform.
wavelet : function
Wavelet function, which should take 2 arguments.
The first argument is the number of points that the returned vector
will have (len(wavelet(length,width)) == length).
The second is a width parameter, defining the size of the wavelet
(e.g. standard deviation of a gaussian). See `ricker`, which
satisfies these requirements.
widths : (M,) sequence
Widths to use for transform.
Returns
-------
cwt: (M, N) ndarray
Will have shape of (len(widths), len(data)).
Notes
-----
::
length = min(10 * width[ii], len(data))
cwt[ii,:] = signal.convolve(data, wavelet(length,
width[ii]), mode='same')
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(-1, 1, 200, endpoint=False)
>>> sig = np.cos(2 * np.pi * 7 * t) + signal.gausspulse(t - 0.4, fc=2)
>>> widths = np.arange(1, 31)
>>> cwtmatr = signal.cwt(sig, signal.ricker, widths)
>>> plt.imshow(cwtmatr, extent=[-1, 1, 31, 1], cmap='PRGn', aspect='auto',
... vmax=abs(cwtmatr).max(), vmin=-abs(cwtmatr).max())
>>> plt.show()
"""
output = np.zeros([len(widths), len(data)])
for ind, width in enumerate(widths):
wavelet_data = wavelet(min(10 * width, len(data)), width)
output[ind, :] = convolve(data, wavelet_data,
mode='same')
return output
| bsd-3-clause |
adrn/gary | gala/potential/potential/core.py | 2 | 29742 | # Standard library
import abc
import copy as pycopy
from collections import OrderedDict
import warnings
import uuid
# Third-party
import numpy as np
from astropy.constants import G
import astropy.units as u
from astropy.utils import isiterable
try:
from scipy.spatial.transform import Rotation
except ImportError:
raise ImportError("Gala requires scipy>=1.2: make sure you have updated "
"your version of scipy and try importing gala again.")
# Project
from ..common import CommonBase
from ...dynamics import PhaseSpacePosition
from ...util import ImmutableDict, atleast_2d
from ...units import DimensionlessUnitSystem
__all__ = ["PotentialBase", "CompositePotential"]
class PotentialBase(CommonBase, metaclass=abc.ABCMeta):
"""
A baseclass for defining pure-Python gravitational potentials.
Subclasses must define (at minimum) a method that evaluates
the potential energy at a given position ``q``
and time ``t``: ``_energy(q, t)``. For integration, the subclasses
must also define a method to evaluate the gradient,
``_gradient(q,t)``. Optionally, they may also define methods
to compute the density and hessian: ``_density()``, ``_hessian()``.
"""
def __init__(self, parameters, origin=None, R=None,
ndim=3, units=None):
self._units = self._validate_units(units)
self.parameters = self._prepare_parameters(parameters, self.units)
try:
self.G = G.decompose(self.units).value
except u.UnitConversionError:
self.G = 1. # TODO: this is a HACK and could lead to user confusion
self.ndim = ndim
if origin is None:
origin = np.zeros(self.ndim)
self.origin = self._remove_units(origin)
if R is not None and ndim not in [2, 3]:
raise NotImplementedError('Gala potentials currently only support '
'rotations when ndim=2 or ndim=3.')
if R is not None:
if isinstance(R, Rotation):
R = R.as_matrix()
R = np.array(R)
if R.shape != (ndim, ndim):
raise ValueError('Rotation matrix passed to potential {0} has '
'an invalid shape: expected {1}, got {2}'
.format(self.__class__.__name__,
(ndim, ndim), R.shape))
self.R = R
def to_latex(self):
return ""
# ========================================================================
# Abstract methods that must be implemented by subclasses
#
@abc.abstractmethod
def _energy(self, q, t=0.):
pass
@abc.abstractmethod
def _gradient(self, q, t=0.):
pass
def _density(self, q, t=0.):
raise NotImplementedError("This Potential has no implemented density function.")
def _hessian(self, q, t=0.):
raise NotImplementedError("This Potential has no implemented Hessian.")
# ========================================================================
# Utility methods
#
def _remove_units(self, x):
"""
Always returns an array. If a Quantity is passed in, it converts to the
units associated with this object and returns the value.
"""
if hasattr(x, 'unit'):
x = x.decompose(self.units).value
else:
x = np.array(x)
return x
def _remove_units_prepare_shape(self, x):
"""
This is similar to that implemented by `gala.potential.common.CommonBase`,
but returns just the position if the input is a `PhaseSpacePosition`.
"""
if hasattr(x, 'unit'):
x = x.decompose(self.units).value
elif isinstance(x, PhaseSpacePosition):
x = x.cartesian.xyz.decompose(self.units).value
x = atleast_2d(x, insert_axis=1).astype(np.float64)
return x
# ========================================================================
# Core methods that use the above implemented functions
#
def energy(self, q, t=0.):
"""
Compute the potential energy at the given position(s).
Parameters
----------
q : `~gala.dynamics.PhaseSpacePosition`, `~astropy.units.Quantity`, array_like
The position to compute the value of the potential. If the
input position object has no units (i.e. is an `~numpy.ndarray`),
it is assumed to be in the same unit system as the potential.
Returns
-------
E : `~astropy.units.Quantity`
The potential energy per unit mass or value of the potential.
"""
q = self._remove_units_prepare_shape(q)
orig_shape, q = self._get_c_valid_arr(q)
t = self._validate_prepare_time(t, q)
ret_unit = self.units['energy'] / self.units['mass']
return self._energy(q, t=t).T.reshape(orig_shape[1:]) * ret_unit
def gradient(self, q, t=0.):
"""
Compute the gradient of the potential at the given position(s).
Parameters
----------
q : `~gala.dynamics.PhaseSpacePosition`, `~astropy.units.Quantity`, array_like
The position to compute the value of the potential. If the
input position object has no units (i.e. is an `~numpy.ndarray`),
it is assumed to be in the same unit system as the potential.
Returns
-------
grad : `~astropy.units.Quantity`
The gradient of the potential. Will have the same shape as
the input position.
"""
q = self._remove_units_prepare_shape(q)
orig_shape, q = self._get_c_valid_arr(q)
t = self._validate_prepare_time(t, q)
ret_unit = self.units['length'] / self.units['time']**2
return (self._gradient(q, t=t).T.reshape(orig_shape) * ret_unit).to(self.units['acceleration'])
def density(self, q, t=0.):
"""
Compute the density value at the given position(s).
Parameters
----------
q : `~gala.dynamics.PhaseSpacePosition`, `~astropy.units.Quantity`, array_like
The position to compute the value of the potential. If the
input position object has no units (i.e. is an `~numpy.ndarray`),
it is assumed to be in the same unit system as the potential.
Returns
-------
dens : `~astropy.units.Quantity`
The potential energy or value of the potential. If the input
position has shape ``q.shape``, the output energy will have
shape ``q.shape[1:]``.
"""
q = self._remove_units_prepare_shape(q)
orig_shape, q = self._get_c_valid_arr(q)
t = self._validate_prepare_time(t, q)
ret_unit = self.units['mass'] / self.units['length']**3
return (self._density(q, t=t).T * ret_unit).to(self.units['mass density'])
def hessian(self, q, t=0.):
"""
Compute the Hessian of the potential at the given position(s).
Parameters
----------
q : `~gala.dynamics.PhaseSpacePosition`, `~astropy.units.Quantity`, array_like
The position to compute the value of the potential. If the
input position object has no units (i.e. is an `~numpy.ndarray`),
it is assumed to be in the same unit system as the potential.
Returns
-------
hess : `~astropy.units.Quantity`
The Hessian matrix of second derivatives of the potential. If the input
position has shape ``q.shape``, the output energy will have shape
``(q.shape[0],q.shape[0]) + q.shape[1:]``. That is, an ``n_dim`` by
``n_dim`` array (matrix) for each position.
"""
if (self.R is not None and
not np.allclose(np.diag(self.R), 1., atol=1e-15, rtol=0)):
raise NotImplementedError("Computing Hessian matrices for rotated "
"potentials is currently not supported.")
q = self._remove_units_prepare_shape(q)
orig_shape,q = self._get_c_valid_arr(q)
t = self._validate_prepare_time(t, q)
ret_unit = 1 / self.units['time']**2
hess = np.moveaxis(self._hessian(q, t=t), 0, -1)
return hess.reshape((orig_shape[0], orig_shape[0]) + orig_shape[1:]) * ret_unit
# ========================================================================
# Convenience methods that make use the base methods
#
def acceleration(self, q, t=0.):
"""
Compute the acceleration due to the potential at the given position(s).
Parameters
----------
q : `~gala.dynamics.PhaseSpacePosition`, `~astropy.units.Quantity`, array_like
Position to compute the acceleration at.
Returns
-------
acc : `~astropy.units.Quantity`
The acceleration. Will have the same shape as the input
position array, ``q``.
"""
return -self.gradient(q, t=t)
def mass_enclosed(self, q, t=0.):
"""
Estimate the mass enclosed within the given position by assuming the potential
is spherical.
Parameters
----------
q : `~gala.dynamics.PhaseSpacePosition`, `~astropy.units.Quantity`, array_like
Position(s) to estimate the enclossed mass.
Returns
-------
menc : `~astropy.units.Quantity`
Mass enclosed at the given position(s). If the input position
has shape ``q.shape``, the output energy will have shape
``q.shape[1:]``.
"""
q = self._remove_units_prepare_shape(q)
orig_shape, q = self._get_c_valid_arr(q)
t = self._validate_prepare_time(t, q)
# small step-size in direction of q
h = 1E-3 # MAGIC NUMBER
# Radius
r = np.sqrt(np.sum(q**2, axis=1))
epsilon = h*q/r[:, np.newaxis]
dPhi_dr_plus = self._energy(q + epsilon, t=t)
dPhi_dr_minus = self._energy(q - epsilon, t=t)
diff = (dPhi_dr_plus - dPhi_dr_minus)
if isinstance(self.units, DimensionlessUnitSystem):
Gee = 1.
else:
Gee = G.decompose(self.units).value
Menc = np.abs(r*r * diff / Gee / (2.*h))
Menc = Menc.reshape(orig_shape[1:])
sgn = 1.
if 'm' in self.parameters and self.parameters['m'] < 0:
sgn = -1.
return sgn * Menc * self.units['mass']
def circular_velocity(self, q, t=0.):
"""
Estimate the circular velocity at the given position assuming the
potential is spherical.
Parameters
----------
q : array_like, numeric
Position(s) to estimate the circular velocity.
Returns
-------
vcirc : `~astropy.units.Quantity`
Circular velocity at the given position(s). If the input position
has shape ``q.shape``, the output energy will have shape
``q.shape[1:]``.
"""
q = self._remove_units_prepare_shape(q)
# Radius
r = np.sqrt(np.sum(q**2, axis=0)) * self.units['length']
dPhi_dxyz = self.gradient(q, t=t)
dPhi_dr = np.sum(dPhi_dxyz * q/r.value, axis=0)
return self.units.decompose(np.sqrt(r * np.abs(dPhi_dr)))
# ========================================================================
# Python special methods
#
def __call__(self, q):
return self.energy(q)
def __repr__(self):
pars = ""
if not isinstance(self.parameters, OrderedDict):
keys = sorted(self.parameters.keys()) # to ensure the order is always the same
else:
keys = self.parameters.keys()
for k in keys:
v = self.parameters[k].value
par_fmt = "{}"
post = ""
if hasattr(v, 'unit'):
post = " {}".format(v.unit)
v = v.value
if isinstance(v, float):
if v == 0:
par_fmt = "{:.0f}"
elif np.log10(v) < -2 or np.log10(v) > 5:
par_fmt = "{:.2e}"
else:
par_fmt = "{:.2f}"
elif isinstance(v, int) and np.log10(v) > 5:
par_fmt = "{:.2e}"
pars += ("{}=" + par_fmt + post).format(k, v) + ", "
if isinstance(self.units, DimensionlessUnitSystem):
return "<{}: {} (dimensionless)>".format(self.__class__.__name__, pars.rstrip(", "))
else:
return "<{}: {} ({})>".format(self.__class__.__name__, pars.rstrip(", "), ",".join(map(str, self.units._core_units)))
def __str__(self):
return self.__class__.__name__
def __add__(self, other):
if not isinstance(other, PotentialBase):
raise TypeError('Cannot add a {} to a {}'
.format(self.__class__.__name__,
other.__class__.__name__))
new_pot = CompositePotential()
if isinstance(self, CompositePotential):
for k, v in self.items():
new_pot[k] = v
else:
k = str(uuid.uuid4())
new_pot[k] = self
if isinstance(other, CompositePotential):
for k, v in self.items():
if k in new_pot:
raise KeyError('Potential component "{}" already exists --'
'duplicate key provided in potential '
'addition')
new_pot[k] = v
else:
k = str(uuid.uuid4())
new_pot[k] = other
return new_pot
# ========================================================================
# Convenience methods that do fancy things
#
def plot_contours(self, grid, filled=True, ax=None, labels=None,
subplots_kw=dict(), **kwargs):
"""
Plot equipotentials contours. Computes the potential energy on a grid
(specified by the array `grid`).
.. warning:: Right now the grid input must be arrays and must already
be in the unit system of the potential. Quantity support is coming...
Parameters
----------
grid : tuple
Coordinate grids or slice value for each dimension. Should be a
tuple of 1D arrays or numbers.
filled : bool (optional)
Use :func:`~matplotlib.pyplot.contourf` instead of
:func:`~matplotlib.pyplot.contour`. Default is ``True``.
ax : matplotlib.Axes (optional)
labels : iterable (optional)
List of axis labels.
subplots_kw : dict
kwargs passed to matplotlib's subplots() function if an axes object
is not specified.
kwargs : dict
kwargs passed to either contourf() or plot().
Returns
-------
fig : `~matplotlib.Figure`
"""
import matplotlib.pyplot as plt
from matplotlib import cm
# figure out which elements are iterable, which are numeric
_grids = []
_slices = []
for ii, g in enumerate(grid):
if isiterable(g):
_grids.append((ii, g))
else:
_slices.append((ii, g))
# figure out the dimensionality
ndim = len(_grids)
# if ndim > 2, don't know how to handle this!
if ndim > 2:
raise ValueError("ndim > 2: you can only make contours on a 2D grid. For other "
"dimensions, you have to specify values to slice.")
if ax is None:
# default figsize
fig, ax = plt.subplots(1, 1, **subplots_kw)
else:
fig = ax.figure
if ndim == 1:
# 1D curve
x1 = _grids[0][1]
r = np.zeros((len(_grids) + len(_slices), len(x1)))
r[_grids[0][0]] = x1
for ii, slc in _slices:
r[ii] = slc
Z = self.energy(r*self.units['length']).value
ax.plot(x1, Z, **kwargs)
if labels is not None:
ax.set_xlabel(labels[0])
ax.set_ylabel("potential")
else:
# 2D contours
x1, x2 = np.meshgrid(_grids[0][1], _grids[1][1])
shp = x1.shape
x1, x2 = x1.ravel(), x2.ravel()
r = np.zeros((len(_grids) + len(_slices), len(x1)))
r[_grids[0][0]] = x1
r[_grids[1][0]] = x2
for ii, slc in _slices:
r[ii] = slc
Z = self.energy(r*self.units['length']).value
# make default colormap not suck
cmap = kwargs.pop('cmap', cm.Blues)
if filled:
cs = ax.contourf(x1.reshape(shp), x2.reshape(shp), Z.reshape(shp),
cmap=cmap, **kwargs)
else:
cs = ax.contour(x1.reshape(shp), x2.reshape(shp), Z.reshape(shp),
cmap=cmap, **kwargs)
if labels is not None:
ax.set_xlabel(labels[0])
ax.set_ylabel(labels[1])
return fig
def plot_density_contours(self, grid, filled=True, ax=None, labels=None,
subplots_kw=dict(), **kwargs):
"""
Plot density contours. Computes the density on a grid
(specified by the array `grid`).
.. warning:: Right now the grid input must be arrays and must already be in
the unit system of the potential. Quantity support is coming...
Parameters
----------
grid : tuple
Coordinate grids or slice value for each dimension. Should be a
tuple of 1D arrays or numbers.
filled : bool (optional)
Use :func:`~matplotlib.pyplot.contourf` instead of
:func:`~matplotlib.pyplot.contour`. Default is ``True``.
ax : matplotlib.Axes (optional)
labels : iterable (optional)
List of axis labels.
subplots_kw : dict
kwargs passed to matplotlib's subplots() function if an axes object
is not specified.
kwargs : dict
kwargs passed to either contourf() or plot().
Returns
-------
fig : `~matplotlib.Figure`
"""
import matplotlib.pyplot as plt
from matplotlib import cm
# figure out which elements are iterable, which are numeric
_grids = []
_slices = []
for ii, g in enumerate(grid):
if isiterable(g):
_grids.append((ii, g))
else:
_slices.append((ii, g))
# figure out the dimensionality
ndim = len(_grids)
# if ndim > 2, don't know how to handle this!
if ndim > 2:
raise ValueError("ndim > 2: you can only make contours on a 2D grid. For other "
"dimensions, you have to specify values to slice.")
if ax is None:
# default figsize
fig, ax = plt.subplots(1, 1, **subplots_kw)
else:
fig = ax.figure
if ndim == 1:
# 1D curve
x1 = _grids[0][1]
r = np.zeros((len(_grids) + len(_slices), len(x1)))
r[_grids[0][0]] = x1
for ii, slc in _slices:
r[ii] = slc
Z = self.density(r*self.units['length']).value
ax.plot(x1, Z, **kwargs)
if labels is not None:
ax.set_xlabel(labels[0])
ax.set_ylabel("potential")
else:
# 2D contours
x1, x2 = np.meshgrid(_grids[0][1], _grids[1][1])
shp = x1.shape
x1, x2 = x1.ravel(), x2.ravel()
r = np.zeros((len(_grids) + len(_slices), len(x1)))
r[_grids[0][0]] = x1
r[_grids[1][0]] = x2
for ii, slc in _slices:
r[ii] = slc
Z = self.density(r*self.units['length']).value
# make default colormap not suck
cmap = kwargs.pop('cmap', cm.Blues)
if filled:
cs = ax.contourf(x1.reshape(shp), x2.reshape(shp), Z.reshape(shp),
cmap=cmap, **kwargs)
else:
cs = ax.contour(x1.reshape(shp), x2.reshape(shp), Z.reshape(shp),
cmap=cmap, **kwargs)
# cs.cmap.set_under('w')
# cs.cmap.set_over('k')
if labels is not None:
ax.set_xlabel(labels[0])
ax.set_ylabel(labels[1])
return fig
def integrate_orbit(self, *args, **kwargs):
"""
.. warning:: This is now deprecated. Convenient orbit integration should
happen using the `gala.potential.Hamiltonian` class. With a
static reference frame, you just need to pass your potential
in to the `~gala.potential.Hamiltonian` constructor.
Integrate an orbit in the current potential using the integrator class
provided. Uses same time specification as `Integrator.run()` -- see
the documentation for `gala.integrate` for more information.
Parameters
----------
w0 : `~gala.dynamics.PhaseSpacePosition`, array_like
Initial conditions.
Integrator : `~gala.integrate.Integrator` (optional)
Integrator class to use.
Integrator_kwargs : dict (optional)
Any extra keyword argumets to pass to the integrator class
when initializing. Only works in non-Cython mode.
cython_if_possible : bool (optional)
If there is a Cython version of the integrator implemented,
and the potential object has a C instance, using Cython
will be *much* faster.
**time_spec
Specification of how long to integrate. See documentation
for `~gala.integrate.parse_time_specification`.
Returns
-------
orbit : `~gala.dynamics.Orbit`
"""
warnings.warn("Use `Hamiltonian.integrate_orbit()` instead. If you are using a "
"static reference frame, you just need to pass your "
"potential object in to the Hamiltonian constructor to use, e.g., "
"orbit = Hamiltonian(potential).integrate_orbit(...).",
DeprecationWarning)
from ..hamiltonian import Hamiltonian
return Hamiltonian(self).integrate_orbit(*args, **kwargs)
def total_energy(self, x, v):
"""
Compute the total energy (per unit mass) of a point in phase-space
in this potential. Assumes the last axis of the input position /
velocity is the dimension axis, e.g., for 100 points in 3-space,
the arrays should have shape (100,3).
Parameters
----------
x : array_like, numeric
Position.
v : array_like, numeric
Velocity.
"""
warnings.warn("Use the energy methods on Orbit objects instead. In a future "
"release this will be removed.", DeprecationWarning)
v = atleast_2d(v, insert_axis=1)
return self.energy(x) + 0.5*np.sum(v**2, axis=0)
def save(self, f):
"""
Save the potential to a text file. See :func:`~gala.potential.save`
for more information.
Parameters
----------
f : str, file_like
A filename or file-like object to write the input potential object to.
"""
from .io import save
save(self, f)
@property
def units(self):
return self._units
def replace_units(self, units, copy=True):
"""Change the unit system of this potential.
Parameters
----------
units : `~gala.units.UnitSystem`
Set of non-reducable units that specify (at minimum) the
length, mass, time, and angle units.
copy : bool (optional)
If True, returns a copy, if False, changes this object.
"""
if copy:
pot = pycopy.deepcopy(self)
else:
pot = self
PotentialBase.__init__(pot,
parameters=self.parameters,
origin=self.origin,
R=self.R,
ndim=self.ndim,
units=units)
return pot
# ========================================================================
# Deprecated methods
#
def _value(self, q, t=0.):
warnings.warn("Use `_energy()` instead.", DeprecationWarning)
return self._energy(q, t=t)
def value(self, *args, **kwargs):
__doc__ = self.energy.__doc__
warnings.warn("Use `energy()` instead.", DeprecationWarning)
return self.energy(*args, **kwargs)
class CompositePotential(PotentialBase, OrderedDict):
"""
A potential composed of several distinct components. For example,
two point masses or a galactic disk and halo, each with their own
potential model.
A `CompositePotential` is created like a Python dictionary, e.g.::
>>> p1 = SomePotential(func1) # doctest: +SKIP
>>> p2 = SomePotential(func2) # doctest: +SKIP
>>> cp = CompositePotential(component1=p1, component2=p2) # doctest: +SKIP
This object actually acts like an `OrderedDict`, so if you want to
preserve the order of the potential components, use::
>>> cp = CompositePotential() # doctest: +SKIP
>>> cp['component1'] = p1 # doctest: +SKIP
>>> cp['component2'] = p2 # doctest: +SKIP
You can also use any of the built-in `Potential` classes as
components::
>>> from gala.potential import HernquistPotential
>>> cp = CompositePotential()
>>> cp['spheroid'] = HernquistPotential(m=1E11, c=10., units=(u.kpc,u.Myr,u.Msun,u.radian))
"""
def __init__(self, *args, **kwargs):
self._units = None
self.ndim = None
if len(args) > 0 and isinstance(args[0], list):
for k, v in args[0]:
kwargs[k] = v
else:
for i, v in args:
kwargs[str(i)] = v
self.lock = False
for v in kwargs.values():
self._check_component(v)
OrderedDict.__init__(self, **kwargs)
self.R = None # TODO: this is a little messy
def __setitem__(self, key, value):
self._check_component(value)
super(CompositePotential, self).__setitem__(key, value)
def _check_component(self, p):
if not isinstance(p, PotentialBase):
raise TypeError("Potential components may only be Potential "
"objects, not {0}.".format(type(p)))
if self.units is None:
self._units = p.units
self.ndim = p.ndim
else:
if (sorted([str(x) for x in self.units]) !=
sorted([str(x) for x in p.units])):
raise ValueError("Unit system of new potential component must "
"match unit systems of other potential "
"components.")
if p.ndim != self.ndim:
raise ValueError("All potential components must have the same "
"number of phase-space dimensions ({} in this "
"case)".format(self.ndim))
if self.lock:
raise ValueError("Potential object is locked - new components can "
"only be added to unlocked potentials.")
@property
def parameters(self):
params = dict()
for k, v in self.items():
params[k] = v.parameters
return ImmutableDict(**params)
def replace_units(self, units):
"""Change the unit system of this potential.
Parameters
----------
units : `~gala.units.UnitSystem`
Set of non-reducable units that specify (at minimum) the
length, mass, time, and angle units.
"""
_lock = self.lock
pots = self.__class__()
pots._units = None
pots.lock = False
for k, v in self.items():
pots[k] = v.replace_units(units)
pots.lock = _lock
return pots
def _energy(self, q, t=0.):
return np.sum([p._energy(q, t) for p in self.values()], axis=0)
def _gradient(self, q, t=0.):
return np.sum([p._gradient(q, t) for p in self.values()], axis=0)
def _hessian(self, w, t=0.):
return np.sum([p._hessian(w, t) for p in self.values()], axis=0)
def _density(self, q, t=0.):
return np.sum([p._density(q, t) for p in self.values()], axis=0)
def __repr__(self):
return "<CompositePotential {}>".format(",".join(self.keys()))
_potential_docstring = """units : `~gala.units.UnitSystem` (optional)
Set of non-reducable units that specify (at minimum) the
length, mass, time, and angle units.
origin : `~astropy.units.Quantity` (optional)
The origin of the potential, the default being 0.
R : `~scipy.spatial.transform.Rotation`, array_like (optional)
A Scipy ``Rotation`` object or an array representing a rotation matrix
that specifies a rotation of the potential. This is applied *after* the
origin shift. Default is the identity matrix.
"""
| mit |
mayblue9/scikit-learn | sklearn/linear_model/tests/test_sgd.py | 68 | 43439 | import pickle
import unittest
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn import linear_model, datasets, metrics
from sklearn.base import clone
from sklearn.linear_model import SGDClassifier, SGDRegressor
from sklearn.preprocessing import LabelEncoder, scale, MinMaxScaler
class SparseSGDClassifier(SGDClassifier):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).fit(X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).partial_fit(X, y, *args, **kw)
def decision_function(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).decision_function(X)
def predict_proba(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).predict_proba(X)
class SparseSGDRegressor(SGDRegressor):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.fit(self, X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.partial_fit(self, X, y, *args, **kw)
def decision_function(self, X, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.decision_function(self, X, *args, **kw)
# Test Data
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2; string class labels
X2 = np.array([[-1, 1], [-0.75, 0.5], [-1.5, 1.5],
[1, 1], [0.75, 0.5], [1.5, 1.5],
[-1, -1], [0, -0.5], [1, -1]])
Y2 = ["one"] * 3 + ["two"] * 3 + ["three"] * 3
T2 = np.array([[-1.5, 0.5], [1, 2], [0, -2]])
true_result2 = ["one", "two", "three"]
# test sample 3
X3 = np.array([[1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 1, 1],
[0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 0]])
Y3 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
# test sample 4 - two more or less redundent feature groups
X4 = np.array([[1, 0.9, 0.8, 0, 0, 0], [1, .84, .98, 0, 0, 0],
[1, .96, .88, 0, 0, 0], [1, .91, .99, 0, 0, 0],
[0, 0, 0, .89, .91, 1], [0, 0, 0, .79, .84, 1],
[0, 0, 0, .91, .95, 1], [0, 0, 0, .93, 1, 1]])
Y4 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
iris = datasets.load_iris()
# test sample 5 - test sample 1 as binary classification problem
X5 = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y5 = [1, 1, 1, 2, 2, 2]
true_result5 = [0, 1, 1]
# Classification Test Case
class CommonTest(object):
def factory(self, **kwargs):
if "random_state" not in kwargs:
kwargs["random_state"] = 42
return self.factory_class(**kwargs)
# a simple implementation of ASGD to use for testing
# uses squared loss to find the gradient
def asgd(self, X, y, eta, alpha, weight_init=None, intercept_init=0.0):
if weight_init is None:
weights = np.zeros(X.shape[1])
else:
weights = weight_init
average_weights = np.zeros(X.shape[1])
intercept = intercept_init
average_intercept = 0.0
decay = 1.0
# sparse data has a fixed decay of .01
if (isinstance(self, SparseSGDClassifierTestCase) or
isinstance(self, SparseSGDRegressorTestCase)):
decay = .01
for i, entry in enumerate(X):
p = np.dot(entry, weights)
p += intercept
gradient = p - y[i]
weights *= 1.0 - (eta * alpha)
weights += -(eta * gradient * entry)
intercept += -(eta * gradient) * decay
average_weights *= i
average_weights += weights
average_weights /= i + 1.0
average_intercept *= i
average_intercept += intercept
average_intercept /= i + 1.0
return average_weights, average_intercept
def _test_warm_start(self, X, Y, lr):
# Test that explicit warm restart...
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf.fit(X, Y)
clf2 = self.factory(alpha=0.001, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf2.fit(X, Y,
coef_init=clf.coef_.copy(),
intercept_init=clf.intercept_.copy())
# ... and implicit warm restart are equivalent.
clf3 = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
warm_start=True, learning_rate=lr)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf.t_)
assert_array_almost_equal(clf3.coef_, clf.coef_)
clf3.set_params(alpha=0.001)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf2.t_)
assert_array_almost_equal(clf3.coef_, clf2.coef_)
def test_warm_start_constant(self):
self._test_warm_start(X, Y, "constant")
def test_warm_start_invscaling(self):
self._test_warm_start(X, Y, "invscaling")
def test_warm_start_optimal(self):
self._test_warm_start(X, Y, "optimal")
def test_input_format(self):
# Input format tests.
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
Y_ = np.array(Y)[:, np.newaxis]
Y_ = np.c_[Y_, Y_]
assert_raises(ValueError, clf.fit, X, Y_)
def test_clone(self):
# Test whether clone works ok.
clf = self.factory(alpha=0.01, n_iter=5, penalty='l1')
clf = clone(clf)
clf.set_params(penalty='l2')
clf.fit(X, Y)
clf2 = self.factory(alpha=0.01, n_iter=5, penalty='l2')
clf2.fit(X, Y)
assert_array_equal(clf.coef_, clf2.coef_)
def test_plain_has_no_average_attr(self):
clf = self.factory(average=True, eta0=.01)
clf.fit(X, Y)
assert_true(hasattr(clf, 'average_coef_'))
assert_true(hasattr(clf, 'average_intercept_'))
assert_true(hasattr(clf, 'standard_intercept_'))
assert_true(hasattr(clf, 'standard_coef_'))
clf = self.factory()
clf.fit(X, Y)
assert_false(hasattr(clf, 'average_coef_'))
assert_false(hasattr(clf, 'average_intercept_'))
assert_false(hasattr(clf, 'standard_intercept_'))
assert_false(hasattr(clf, 'standard_coef_'))
def test_late_onset_averaging_not_reached(self):
clf1 = self.factory(average=600)
clf2 = self.factory()
for _ in range(100):
if isinstance(clf1, SGDClassifier):
clf1.partial_fit(X, Y, classes=np.unique(Y))
clf2.partial_fit(X, Y, classes=np.unique(Y))
else:
clf1.partial_fit(X, Y)
clf2.partial_fit(X, Y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=16)
assert_almost_equal(clf1.intercept_, clf2.intercept_, decimal=16)
def test_late_onset_averaging_reached(self):
eta0 = .001
alpha = .0001
Y_encode = np.array(Y)
Y_encode[Y_encode == 1] = -1.0
Y_encode[Y_encode == 2] = 1.0
clf1 = self.factory(average=7, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=2, shuffle=False)
clf2 = self.factory(average=0, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=1, shuffle=False)
clf1.fit(X, Y_encode)
clf2.fit(X, Y_encode)
average_weights, average_intercept = \
self.asgd(X, Y_encode, eta0, alpha,
weight_init=clf2.coef_.ravel(),
intercept_init=clf2.intercept_)
assert_array_almost_equal(clf1.coef_.ravel(),
average_weights.ravel(),
decimal=16)
assert_almost_equal(clf1.intercept_, average_intercept, decimal=16)
class DenseSGDClassifierTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDClassifier
def test_sgd(self):
# Check that SGD gives any results :-)
for loss in ("hinge", "squared_hinge", "log", "modified_huber"):
clf = self.factory(penalty='l2', alpha=0.01, fit_intercept=True,
loss=loss, n_iter=10, shuffle=True)
clf.fit(X, Y)
# assert_almost_equal(clf.coef_[0], clf.coef_[1], decimal=7)
assert_array_equal(clf.predict(T), true_result)
@raises(ValueError)
def test_sgd_bad_l1_ratio(self):
# Check whether expected ValueError on bad l1_ratio
self.factory(l1_ratio=1.1)
@raises(ValueError)
def test_sgd_bad_learning_rate_schedule(self):
# Check whether expected ValueError on bad learning_rate
self.factory(learning_rate="<unknown>")
@raises(ValueError)
def test_sgd_bad_eta0(self):
# Check whether expected ValueError on bad eta0
self.factory(eta0=0, learning_rate="constant")
@raises(ValueError)
def test_sgd_bad_alpha(self):
# Check whether expected ValueError on bad alpha
self.factory(alpha=-.1)
@raises(ValueError)
def test_sgd_bad_penalty(self):
# Check whether expected ValueError on bad penalty
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
# Check whether expected ValueError on bad loss
self.factory(loss="foobar")
@raises(ValueError)
def test_sgd_n_iter_param(self):
# Test parameter validity check
self.factory(n_iter=-10000)
@raises(ValueError)
def test_sgd_shuffle_param(self):
# Test parameter validity check
self.factory(shuffle="false")
@raises(TypeError)
def test_argument_coef(self):
# Checks coef_init not allowed as model argument (only fit)
# Provided coef_ does not match dataset.
self.factory(coef_init=np.zeros((3,))).fit(X, Y)
@raises(ValueError)
def test_provide_coef(self):
# Checks coef_init shape for the warm starts
# Provided coef_ does not match dataset.
self.factory().fit(X, Y, coef_init=np.zeros((3,)))
@raises(ValueError)
def test_set_intercept(self):
# Checks intercept_ shape for the warm starts
# Provided intercept_ does not match dataset.
self.factory().fit(X, Y, intercept_init=np.zeros((3,)))
def test_set_intercept_binary(self):
# Checks intercept_ shape for the warm starts in binary case
self.factory().fit(X5, Y5, intercept_init=0)
def test_average_binary_computed_correctly(self):
# Checks the SGDClassifier correctly computes the average weights
eta = .1
alpha = 2.
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
# simple linear function without noise
y = np.dot(X, w)
y = np.sign(y)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
average_weights = average_weights.reshape(1, -1)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=14)
assert_almost_equal(clf.intercept_, average_intercept, decimal=14)
def test_set_intercept_to_intercept(self):
# Checks intercept_ shape consistency for the warm starts
# Inconsistent intercept_ shape.
clf = self.factory().fit(X5, Y5)
self.factory().fit(X5, Y5, intercept_init=clf.intercept_)
clf = self.factory().fit(X, Y)
self.factory().fit(X, Y, intercept_init=clf.intercept_)
@raises(ValueError)
def test_sgd_at_least_two_labels(self):
# Target must have at least two labels
self.factory(alpha=0.01, n_iter=20).fit(X2, np.ones(9))
def test_partial_fit_weight_class_balanced(self):
# partial_fit with class_weight='balanced' not supported"""
assert_raises_regexp(ValueError,
"class_weight 'balanced' is not supported for "
"partial_fit. In order to use 'balanced' weights, "
"use compute_class_weight\('balanced', classes, y\). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.",
self.factory(class_weight='balanced').partial_fit,
X, Y, classes=np.unique(Y))
def test_sgd_multiclass(self):
# Multi-class test case
clf = self.factory(alpha=0.01, n_iter=20).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_average(self):
eta = .001
alpha = .01
# Multi-class average test case
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
np_Y2 = np.array(Y2)
clf.fit(X2, np_Y2)
classes = np.unique(np_Y2)
for i, cl in enumerate(classes):
y_i = np.ones(np_Y2.shape[0])
y_i[np_Y2 != cl] = -1
average_coef, average_intercept = self.asgd(X2, y_i, eta, alpha)
assert_array_almost_equal(average_coef, clf.coef_[i], decimal=16)
assert_almost_equal(average_intercept,
clf.intercept_[i],
decimal=16)
def test_sgd_multiclass_with_init_coef(self):
# Multi-class test case
clf = self.factory(alpha=0.01, n_iter=20)
clf.fit(X2, Y2, coef_init=np.zeros((3, 2)),
intercept_init=np.zeros(3))
assert_equal(clf.coef_.shape, (3, 2))
assert_true(clf.intercept_.shape, (3,))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_njobs(self):
# Multi-class test case with multi-core support
clf = self.factory(alpha=0.01, n_iter=20, n_jobs=2).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_set_coef_multiclass(self):
# Checks coef_init and intercept_init shape for for multi-class
# problems
# Provided coef_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2, coef_init=np.zeros((2, 2)))
# Provided coef_ does match dataset
clf = self.factory().fit(X2, Y2, coef_init=np.zeros((3, 2)))
# Provided intercept_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2,
intercept_init=np.zeros((1,)))
# Provided intercept_ does match dataset.
clf = self.factory().fit(X2, Y2, intercept_init=np.zeros((3,)))
def test_sgd_proba(self):
# Check SGD.predict_proba
# Hinge loss does not allow for conditional prob estimate.
# We cannot use the factory here, because it defines predict_proba
# anyway.
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=10).fit(X, Y)
assert_false(hasattr(clf, "predict_proba"))
assert_false(hasattr(clf, "predict_log_proba"))
# log and modified_huber losses can output probability estimates
# binary case
for loss in ["log", "modified_huber"]:
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X, Y)
p = clf.predict_proba([[3, 2]])
assert_true(p[0, 1] > 0.5)
p = clf.predict_proba([[-1, -1]])
assert_true(p[0, 1] < 0.5)
p = clf.predict_log_proba([[3, 2]])
assert_true(p[0, 1] > p[0, 0])
p = clf.predict_log_proba([[-1, -1]])
assert_true(p[0, 1] < p[0, 0])
# log loss multiclass probability estimates
clf = self.factory(loss="log", alpha=0.01, n_iter=10).fit(X2, Y2)
d = clf.decision_function([[.1, -.1], [.3, .2]])
p = clf.predict_proba([[.1, -.1], [.3, .2]])
assert_array_equal(np.argmax(p, axis=1), np.argmax(d, axis=1))
assert_almost_equal(p[0].sum(), 1)
assert_true(np.all(p[0] >= 0))
p = clf.predict_proba([[-1, -1]])
d = clf.decision_function([[-1, -1]])
assert_array_equal(np.argsort(p[0]), np.argsort(d[0]))
l = clf.predict_log_proba([[3, 2]])
p = clf.predict_proba([[3, 2]])
assert_array_almost_equal(np.log(p), l)
l = clf.predict_log_proba([[-1, -1]])
p = clf.predict_proba([[-1, -1]])
assert_array_almost_equal(np.log(p), l)
# Modified Huber multiclass probability estimates; requires a separate
# test because the hard zero/one probabilities may destroy the
# ordering present in decision_function output.
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X2, Y2)
d = clf.decision_function([[3, 2]])
p = clf.predict_proba([[3, 2]])
if not isinstance(self, SparseSGDClassifierTestCase):
assert_equal(np.argmax(d, axis=1), np.argmax(p, axis=1))
else: # XXX the sparse test gets a different X2 (?)
assert_equal(np.argmin(d, axis=1), np.argmin(p, axis=1))
# the following sample produces decision_function values < -1,
# which would cause naive normalization to fail (see comment
# in SGDClassifier.predict_proba)
x = X.mean(axis=0)
d = clf.decision_function([x])
if np.all(d < -1): # XXX not true in sparse test case (why?)
p = clf.predict_proba([x])
assert_array_almost_equal(p[0], [1 / 3.] * 3)
def test_sgd_l1(self):
# Test L1 regularization
n = len(X4)
rng = np.random.RandomState(13)
idx = np.arange(n)
rng.shuffle(idx)
X = X4[idx, :]
Y = Y4[idx]
clf = self.factory(penalty='l1', alpha=.2, fit_intercept=False,
n_iter=2000, shuffle=False)
clf.fit(X, Y)
assert_array_equal(clf.coef_[0, 1:-1], np.zeros((4,)))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# test sparsify with dense inputs
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# pickle and unpickle with sparse coef_
clf = pickle.loads(pickle.dumps(clf))
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
def test_class_weights(self):
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_equal_class_weight(self):
# Test if equal class weights approx. equals no class weights.
X = [[1, 0], [1, 0], [0, 1], [0, 1]]
y = [0, 0, 1, 1]
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=None)
clf.fit(X, y)
X = [[1, 0], [0, 1]]
y = [0, 1]
clf_weighted = self.factory(alpha=0.1, n_iter=1000,
class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X, y)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
@raises(ValueError)
def test_wrong_class_weight_label(self):
# ValueError due to not existing class label.
clf = self.factory(alpha=0.1, n_iter=1000, class_weight={0: 0.5})
clf.fit(X, Y)
@raises(ValueError)
def test_wrong_class_weight_format(self):
# ValueError due to wrong class_weight argument type.
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=[0.5])
clf.fit(X, Y)
def test_weights_multiplied(self):
# Tests that class_weight and sample_weight are multiplicative
class_weights = {1: .6, 2: .3}
sample_weights = np.random.random(Y4.shape[0])
multiplied_together = np.copy(sample_weights)
multiplied_together[Y4 == 1] *= class_weights[1]
multiplied_together[Y4 == 2] *= class_weights[2]
clf1 = self.factory(alpha=0.1, n_iter=20, class_weight=class_weights)
clf2 = self.factory(alpha=0.1, n_iter=20)
clf1.fit(X4, Y4, sample_weight=sample_weights)
clf2.fit(X4, Y4, sample_weight=multiplied_together)
assert_almost_equal(clf1.coef_, clf2.coef_)
def test_balanced_weight(self):
# Test class weights for imbalanced data"""
# compute reference metrics on iris dataset that is quite balanced by
# default
X, y = iris.data, iris.target
X = scale(X)
idx = np.arange(X.shape[0])
rng = np.random.RandomState(6)
rng.shuffle(idx)
X = X[idx]
y = y[idx]
clf = self.factory(alpha=0.0001, n_iter=1000,
class_weight=None, shuffle=False).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf.predict(X), average='weighted'), 0.96,
decimal=1)
# make the same prediction using balanced class_weight
clf_balanced = self.factory(alpha=0.0001, n_iter=1000,
class_weight="balanced",
shuffle=False).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf_balanced.predict(X), average='weighted'), 0.96,
decimal=1)
# Make sure that in the balanced case it does not change anything
# to use "balanced"
assert_array_almost_equal(clf.coef_, clf_balanced.coef_, 6)
# build an very very imbalanced dataset out of iris data
X_0 = X[y == 0, :]
y_0 = y[y == 0]
X_imbalanced = np.vstack([X] + [X_0] * 10)
y_imbalanced = np.concatenate([y] + [y_0] * 10)
# fit a model on the imbalanced data without class weight info
clf = self.factory(n_iter=1000, class_weight=None, shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_less(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
# fit a model with balanced class_weight enabled
clf = self.factory(n_iter=1000, class_weight="balanced", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
# fit another using a fit parameter override
clf = self.factory(n_iter=1000, class_weight="balanced", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
def test_sample_weights(self):
# Test weights on individual samples
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf.fit(X, y, sample_weight=[0.001] * 3 + [1] * 2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
@raises(ValueError)
def test_wrong_sample_weights(self):
# Test if ValueError is raised if sample_weight has wrong shape
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
# provided sample_weight too long
clf.fit(X, Y, sample_weight=np.arange(7))
@raises(ValueError)
def test_partial_fit_exception(self):
clf = self.factory(alpha=0.01)
# classes was not specified
clf.partial_fit(X3, Y3)
def test_partial_fit_binary(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y)
clf.partial_fit(X[:third], Y[:third], classes=classes)
assert_equal(clf.coef_.shape, (1, X.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
y_pred = clf.predict(T)
assert_array_equal(y_pred, true_result)
def test_partial_fit_multiclass(self):
third = X2.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y2)
clf.partial_fit(X2[:third], Y2[:third], classes=classes)
assert_equal(clf.coef_.shape, (3, X2.shape[1]))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, 3))
id1 = id(clf.coef_.data)
clf.partial_fit(X2[third:], Y2[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def test_fit_then_partial_fit(self):
# Partial_fit should work after initial fit in the multiclass case.
# Non-regression test for #2496; fit would previously produce a
# Fortran-ordered coef_ that subsequent partial_fit couldn't handle.
clf = self.factory()
clf.fit(X2, Y2)
clf.partial_fit(X2, Y2) # no exception here
def _test_partial_fit_equal_fit(self, lr):
for X_, Y_, T_ in ((X, Y, T), (X2, Y2, T2)):
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=2,
learning_rate=lr, shuffle=False)
clf.fit(X_, Y_)
y_pred = clf.decision_function(T_)
t = clf.t_
classes = np.unique(Y_)
clf = self.factory(alpha=0.01, eta0=0.01, learning_rate=lr,
shuffle=False)
for i in range(2):
clf.partial_fit(X_, Y_, classes=classes)
y_pred2 = clf.decision_function(T_)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_regression_losses(self):
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="squared_epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, loss="huber")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant", eta0=0.01,
loss="squared_loss")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
def test_warm_start_multiclass(self):
self._test_warm_start(X2, Y2, "optimal")
def test_multiple_fit(self):
# Test multiple calls of fit w/ different shaped inputs.
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
assert_true(hasattr(clf, "coef_"))
# Non-regression test: try fitting with a different label set.
y = [["ham", "spam"][i] for i in LabelEncoder().fit_transform(Y)]
clf.fit(X[:, :-1], y)
class SparseSGDClassifierTestCase(DenseSGDClassifierTestCase):
"""Run exactly the same tests using the sparse representation variant"""
factory_class = SparseSGDClassifier
###############################################################################
# Regression Test Case
class DenseSGDRegressorTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDRegressor
def test_sgd(self):
# Check that SGD gives any results.
clf = self.factory(alpha=0.1, n_iter=2,
fit_intercept=False)
clf.fit([[0, 0], [1, 1], [2, 2]], [0, 1, 2])
assert_equal(clf.coef_[0], clf.coef_[1])
@raises(ValueError)
def test_sgd_bad_penalty(self):
# Check whether expected ValueError on bad penalty
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
# Check whether expected ValueError on bad loss
self.factory(loss="foobar")
def test_sgd_averaged_computed_correctly(self):
# Tests the average regressor matches the naive implementation
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_averaged_partial_fit(self):
# Tests whether the partial fit yields the same average as the fit
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
clf.partial_fit(X[:int(n_samples / 2)][:], y[:int(n_samples / 2)])
clf.partial_fit(X[int(n_samples / 2):][:], y[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_[0], average_intercept, decimal=16)
def test_average_sparse(self):
# Checks the average weights on data with 0s
eta = .001
alpha = .01
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
n_samples = Y3.shape[0]
clf.partial_fit(X3[:int(n_samples / 2)][:], Y3[:int(n_samples / 2)])
clf.partial_fit(X3[int(n_samples / 2):][:], Y3[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X3, Y3, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_least_squares_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_sgd_epsilon_insensitive(self):
xmin, xmax = -5, 5
n_samples = 100
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() \
+ np.random.randn(n_samples, 1).ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.5)
def test_sgd_huber_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_elasticnet_convergence(self):
# Check that the SGD output is consistent with coordinate descent
n_samples, n_features = 1000, 5
rng = np.random.RandomState(0)
X = np.random.randn(n_samples, n_features)
# ground_truth linear model that generate y from X and to which the
# models should converge if the regularizer would be set to 0.0
ground_truth_coef = rng.randn(n_features)
y = np.dot(X, ground_truth_coef)
# XXX: alpha = 0.1 seems to cause convergence problems
for alpha in [0.01, 0.001]:
for l1_ratio in [0.5, 0.8, 1.0]:
cd = linear_model.ElasticNet(alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
cd.fit(X, y)
sgd = self.factory(penalty='elasticnet', n_iter=50,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
sgd.fit(X, y)
err_msg = ("cd and sgd did not converge to comparable "
"results for alpha=%f and l1_ratio=%f"
% (alpha, l1_ratio))
assert_almost_equal(cd.coef_, sgd.coef_, decimal=2,
err_msg=err_msg)
def test_partial_fit(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
clf.partial_fit(X[:third], Y[:third])
assert_equal(clf.coef_.shape, (X.shape[1], ))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def _test_partial_fit_equal_fit(self, lr):
clf = self.factory(alpha=0.01, n_iter=2, eta0=0.01,
learning_rate=lr, shuffle=False)
clf.fit(X, Y)
y_pred = clf.predict(T)
t = clf.t_
clf = self.factory(alpha=0.01, eta0=0.01,
learning_rate=lr, shuffle=False)
for i in range(2):
clf.partial_fit(X, Y)
y_pred2 = clf.predict(T)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_loss_function_epsilon(self):
clf = self.factory(epsilon=0.9)
clf.set_params(epsilon=0.1)
assert clf.loss_functions['huber'][1] == 0.1
class SparseSGDRegressorTestCase(DenseSGDRegressorTestCase):
# Run exactly the same tests using the sparse representation variant
factory_class = SparseSGDRegressor
def test_l1_ratio():
# Test if l1 ratio extremes match L1 and L2 penalty settings.
X, y = datasets.make_classification(n_samples=1000,
n_features=100, n_informative=20,
random_state=1234)
# test if elasticnet with l1_ratio near 1 gives same result as pure l1
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.9999999999, random_state=42).fit(X, y)
est_l1 = SGDClassifier(alpha=0.001, penalty='l1', random_state=42).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l1.coef_)
# test if elasticnet with l1_ratio near 0 gives same result as pure l2
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.0000000001, random_state=42).fit(X, y)
est_l2 = SGDClassifier(alpha=0.001, penalty='l2', random_state=42).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l2.coef_)
def test_underflow_or_overlow():
with np.errstate(all='raise'):
# Generate some weird data with hugely unscaled features
rng = np.random.RandomState(0)
n_samples = 100
n_features = 10
X = rng.normal(size=(n_samples, n_features))
X[:, :2] *= 1e300
assert_true(np.isfinite(X).all())
# Use MinMaxScaler to scale the data without introducing a numerical
# instability (computing the standard deviation naively is not possible
# on this data)
X_scaled = MinMaxScaler().fit_transform(X)
assert_true(np.isfinite(X_scaled).all())
# Define a ground truth on the scaled data
ground_truth = rng.normal(size=n_features)
y = (np.dot(X_scaled, ground_truth) > 0.).astype(np.int32)
assert_array_equal(np.unique(y), [0, 1])
model = SGDClassifier(alpha=0.1, loss='squared_hinge', n_iter=500)
# smoke test: model is stable on scaled data
model.fit(X_scaled, y)
assert_true(np.isfinite(model.coef_).all())
# model is numerically unstable on unscaled data
msg_regxp = (r"Floating-point under-/overflow occurred at epoch #.*"
" Scaling input data with StandardScaler or MinMaxScaler"
" might help.")
assert_raises_regexp(ValueError, msg_regxp, model.fit, X, y)
def test_numerical_stability_large_gradient():
# Non regression test case for numerical stability on scaled problems
# where the gradient can still explode with some losses
model = SGDClassifier(loss='squared_hinge', n_iter=10, shuffle=True,
penalty='elasticnet', l1_ratio=0.3, alpha=0.01,
eta0=0.001, random_state=0)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_true(np.isfinite(model.coef_).all())
def test_large_regularization():
# Non regression tests for numerical stability issues caused by large
# regularization parameters
for penalty in ['l2', 'l1', 'elasticnet']:
model = SGDClassifier(alpha=1e5, learning_rate='constant', eta0=0.1,
n_iter=5, penalty=penalty, shuffle=False)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_array_almost_equal(model.coef_, np.zeros_like(model.coef_))
| bsd-3-clause |
gully/PyKE | pyke/keptimefix.py | 2 | 5648 | from .utils import PyKEArgumentHelpFormatter
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import fits as pyfits
from . import kepio, kepmsg
__all__ = ['keptimefix']
def keptimefix(infile, outfile=None, overwrite=False, verbose=False,
logfile='keptimefix.log'):
"""
keptimefix -- Correct a time stamp error in the target pixel files
All Kepler light curve and target pixel files with version numbers 5.0
contain an error in the time stamps. This was fixed in the light curve with
version 5.0 (at MAST after May 2013). The timescale for fixing the target
pixel files is unclear but in the mean time this script will fix the target
pixel file time stamps and make the times consistent with the light curve
files. The error in Q0-13 can be corrected by adding 66.184s. During Q14
there was a leap second added Q15+ can be corrected by adding 67.184s. This
tool fixes the time stamp accordingly.
Parameters
----------
infile : str
The name of a Kepler target pixel file obtained from the MAST.
outfile : str
The name of the output FITS target pixel filefile. outfile will be a
direct copy of infile but with the TIME column updates to be correct
and consistent with the Kepler light curve files
overwrite : bool
Overwrite the output file?
verbose : bool
Print informative messages and warnings to the shell and logfile?
logfile : str
Name of the logfile containing error and warning messages.
"""
if outfile is None:
outfile = infile.split('.')[0] + "-{}.fits".format(__all__[0])
# log the call
hashline = '--------------------------------------------------------------'
kepmsg.log(logfile, hashline, verbose)
call = ('KEPTIMEFIX -- '
+ ' infile={}'.format(infile)
+ ' outfile={}'.format(outfile)
+ ' overwrite={}'.format(overwrite)
+ ' verbose={}'.format(verbose)
+ ' logfile={}'.format(logfile))
kepmsg.log(logfile, call+'\n', verbose)
# start time
kepmsg.clock('KEPTIMEFIX started at', logfile, verbose)
if overwrite:
kepio.overwrite(outfile, logfile, verbose)
if kepio.fileexists(outfile):
errmsg = 'ERROR -- KEPTIMEFIX: {} exists. Use --overwrite'.format(outfile)
kepmsg.err(logfile, errmsg, verbose)
instr = pyfits.open(infile, 'readonly')
creator = instr[0].header['CREATOR']
if creator.find('TargetPixelExporterPipelineModule') < 0:
errmsg = 'ERROR -- KEPTIMEFIX: this file is not a target pixel file'
kepmsg.err(logfile, errmsg, verbose)
header_ext1 = instr[1].header.cards
data_ext1 = instr[1].data
fileversion = instr[0].header['FILEVER']
if float(fileversion) > 4.0:
errmsg = ('ERROR -- KEPTIMEFIX: no time fix needed for this file.'
' FILEVER > 4.0')
kepmsg.err(logfile, errmsg, verbose)
quarter = instr[0].header['QUARTER']
if instr[0].header['OBSMODE'] == 'long cadence':
cadencetype = 'L'
elif instr[0].header['OBSMODE'] == 'short cadence':
cadencetype = 'S'
TIME_wrong = data_ext1.field('TIME')
CADNUM = data_ext1.field('CADENCENO')
TIMECORR_old = data_ext1.field('TIMECORR')
## update headers
##TSTART, TSTART, EXPOSURE, TELAPSE, LIVETIME
##DATE-OBS, DATE-END
if cadencetype == 'L':
offset = np.where(CADNUM <= 57139, 66.184, 67.184) / 86400.
elif cadencetype == 'S':
offset = np.where(CADNUM <= 1702663, 66.184, 67.184) / 86400.
TIME_right = TIME_wrong + offset
TIMECORR_new = TIMECORR_old + offset
instr[1].data['TIME'][:] = TIME_right
#we decided not to use the updated timecorr because
#it is different from the LC FITS files by ~1 ms.
instr[1].data['TIMECORR'][:] = np.nan * np.empty(len(TIMECORR_old))
#now to fix the header
tstart_right = instr[1].header['TSTART'] + offset[0]
tstop_right = instr[1].header['TSTOP'] + offset[-1]
telapse_right = tstop_right - tstart_right
instr[1].header['TSTART'] = tstart_right
instr[1].header['TSTOP'] = tstop_right
instr[1].header['TELAPSE'] = telapse_right
deadc = instr[1].header['DEADC']
instr[1].header['LIVETIME'] = telapse_right * deadc
#get the date-obs
dstart = instr[1].header['DATE-OBS']
dend = instr[1].header['DATE-END']
print("Writing output file {}...".format(outfile))
instr.writeto(outfile)
# end time
kepmsg.clock('KEPTIMEFIX completed at', logfile, verbose)
def keptimefix_main():
import argparse
parser = argparse.ArgumentParser(
description='Fix the time error in the target pixel files',
formatter_class=PyKEArgumentHelpFormatter)
parser.add_argument('infile', help='Name of FITS input target pixel file',
type=str)
parser.add_argument('--outfile',
help=('Name of FITS target pixel file to output.'
' If None, outfile is infile-keptimefix.'),
default=None)
parser.add_argument('--overwrite', action='store_true',
help='overwrite a file with the same name as outfile?')
parser.add_argument('--verbose', action='store_true',
help='Write to a log file?')
parser.add_argument('--logfile', help='Name of ascii log file',
default='keptimefix.log', type=str)
args = parser.parse_args()
keptimefix(args.infile, args.outfile, args.overwrite, args.verbose,
args.logfile)
| mit |
ivano666/tensorflow | tensorflow/contrib/learn/python/learn/tests/test_early_stopping.py | 3 | 2577 | # Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import random
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn import datasets
from tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score
from tensorflow.contrib.learn.python.learn.estimators._sklearn import train_test_split
class EarlyStoppingTest(tf.test.TestCase):
def testIrisES(self):
random.seed(42)
iris = datasets.load_iris()
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
test_size=0.2,
random_state=42)
X_train, X_val, y_train, y_val = train_test_split(
X_train, y_train, test_size=0.2)
val_monitor = learn.monitors.ValidationMonitor(X_val, y_val,
early_stopping_rounds=100)
# classifier without early stopping - overfitting
classifier1 = learn.TensorFlowDNNClassifier(hidden_units=[10, 20, 10],
n_classes=3,
steps=1000)
classifier1.fit(X_train, y_train)
score1 = accuracy_score(y_test, classifier1.predict(X_test))
# classifier with early stopping - improved accuracy on testing set
classifier2 = learn.TensorFlowDNNClassifier(hidden_units=[10, 20, 10],
n_classes=3,
steps=1000)
classifier2.fit(X_train, y_train, monitors=[val_monitor])
score2 = accuracy_score(y_test, classifier2.predict(X_test))
# self.assertGreater(score2, score1, "No improvement using early stopping.")
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
RPGOne/scikit-learn | examples/cluster/plot_digits_agglomeration.py | 377 | 1694 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Feature agglomeration
=========================================================
These images how similar features are merged together using
feature agglomeration.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, cluster
from sklearn.feature_extraction.image import grid_to_graph
digits = datasets.load_digits()
images = digits.images
X = np.reshape(images, (len(images), -1))
connectivity = grid_to_graph(*images[0].shape)
agglo = cluster.FeatureAgglomeration(connectivity=connectivity,
n_clusters=32)
agglo.fit(X)
X_reduced = agglo.transform(X)
X_restored = agglo.inverse_transform(X_reduced)
images_restored = np.reshape(X_restored, images.shape)
plt.figure(1, figsize=(4, 3.5))
plt.clf()
plt.subplots_adjust(left=.01, right=.99, bottom=.01, top=.91)
for i in range(4):
plt.subplot(3, 4, i + 1)
plt.imshow(images[i], cmap=plt.cm.gray, vmax=16, interpolation='nearest')
plt.xticks(())
plt.yticks(())
if i == 1:
plt.title('Original data')
plt.subplot(3, 4, 4 + i + 1)
plt.imshow(images_restored[i], cmap=plt.cm.gray, vmax=16,
interpolation='nearest')
if i == 1:
plt.title('Agglomerated data')
plt.xticks(())
plt.yticks(())
plt.subplot(3, 4, 10)
plt.imshow(np.reshape(agglo.labels_, images[0].shape),
interpolation='nearest', cmap=plt.cm.spectral)
plt.xticks(())
plt.yticks(())
plt.title('Labels')
plt.show()
| bsd-3-clause |
sinhrks/scikit-learn | sklearn/pipeline.py | 12 | 21283 | """
The :mod:`sklearn.pipeline` module implements utilities to build a composite
estimator, as a chain of transforms and estimators.
"""
# Author: Edouard Duchesnay
# Gael Varoquaux
# Virgile Fritsch
# Alexandre Gramfort
# Lars Buitinck
# Licence: BSD
from collections import defaultdict
from warnings import warn
import numpy as np
from scipy import sparse
from .base import BaseEstimator, TransformerMixin
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import tosequence
from .utils.metaestimators import if_delegate_has_method
from .externals.six import iteritems
__all__ = ['Pipeline', 'FeatureUnion']
class Pipeline(BaseEstimator):
"""Pipeline of transforms with a final estimator.
Sequentially apply a list of transforms and a final estimator.
Intermediate steps of the pipeline must be 'transforms', that is, they
must implement fit and transform methods.
The final estimator only needs to implement fit.
The purpose of the pipeline is to assemble several steps that can be
cross-validated together while setting different parameters.
For this, it enables setting parameters of the various steps using their
names and the parameter name separated by a '__', as in the example below.
Read more in the :ref:`User Guide <pipeline>`.
Parameters
----------
steps : list
List of (name, transform) tuples (implementing fit/transform) that are
chained, in the order in which they are chained, with the last object
an estimator.
Attributes
----------
named_steps : dict
Read-only attribute to access any step parameter by user given name.
Keys are step names and values are steps parameters.
Examples
--------
>>> from sklearn import svm
>>> from sklearn.datasets import samples_generator
>>> from sklearn.feature_selection import SelectKBest
>>> from sklearn.feature_selection import f_regression
>>> from sklearn.pipeline import Pipeline
>>> # generate some data to play with
>>> X, y = samples_generator.make_classification(
... n_informative=5, n_redundant=0, random_state=42)
>>> # ANOVA SVM-C
>>> anova_filter = SelectKBest(f_regression, k=5)
>>> clf = svm.SVC(kernel='linear')
>>> anova_svm = Pipeline([('anova', anova_filter), ('svc', clf)])
>>> # You can set the parameters using the names issued
>>> # For instance, fit using a k of 10 in the SelectKBest
>>> # and a parameter 'C' of the svm
>>> anova_svm.set_params(anova__k=10, svc__C=.1).fit(X, y)
... # doctest: +ELLIPSIS
Pipeline(steps=[...])
>>> prediction = anova_svm.predict(X)
>>> anova_svm.score(X, y) # doctest: +ELLIPSIS
0.77...
>>> # getting the selected features chosen by anova_filter
>>> anova_svm.named_steps['anova'].get_support()
... # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, False, False, True, False, True, True, True,
False, False, True, False, True, False, False, False, False,
True], dtype=bool)
"""
# BaseEstimator interface
def __init__(self, steps):
names, estimators = zip(*steps)
if len(dict(steps)) != len(steps):
raise ValueError("Provided step names are not unique: %s" % (names,))
# shallow copy of steps
self.steps = tosequence(steps)
transforms = estimators[:-1]
estimator = estimators[-1]
for t in transforms:
if (not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not
hasattr(t, "transform")):
raise TypeError("All intermediate steps of the chain should "
"be transforms and implement fit and transform"
" '%s' (type %s) doesn't)" % (t, type(t)))
if not hasattr(estimator, "fit"):
raise TypeError("Last step of chain should implement fit "
"'%s' (type %s) doesn't)"
% (estimator, type(estimator)))
@property
def _estimator_type(self):
return self.steps[-1][1]._estimator_type
def get_params(self, deep=True):
if not deep:
return super(Pipeline, self).get_params(deep=False)
else:
out = self.named_steps
for name, step in six.iteritems(self.named_steps):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
out.update(super(Pipeline, self).get_params(deep=False))
return out
@property
def named_steps(self):
return dict(self.steps)
@property
def _final_estimator(self):
return self.steps[-1][1]
# Estimator interface
def _pre_transform(self, X, y=None, **fit_params):
fit_params_steps = dict((step, {}) for step, _ in self.steps)
for pname, pval in six.iteritems(fit_params):
step, param = pname.split('__', 1)
fit_params_steps[step][param] = pval
Xt = X
for name, transform in self.steps[:-1]:
if hasattr(transform, "fit_transform"):
Xt = transform.fit_transform(Xt, y, **fit_params_steps[name])
else:
Xt = transform.fit(Xt, y, **fit_params_steps[name]) \
.transform(Xt)
return Xt, fit_params_steps[self.steps[-1][0]]
def fit(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then fit the transformed data using the final estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
self.steps[-1][-1].fit(Xt, y, **fit_params)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then use fit_transform on transformed data using the final
estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
if hasattr(self.steps[-1][-1], 'fit_transform'):
return self.steps[-1][-1].fit_transform(Xt, y, **fit_params)
else:
return self.steps[-1][-1].fit(Xt, y, **fit_params).transform(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def predict(self, X):
"""Applies transforms to the data, and the predict method of the
final estimator. Valid only if the final estimator implements
predict.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def fit_predict(self, X, y=None, **fit_params):
"""Applies fit_predict of last step in pipeline after transforms.
Applies fit_transforms of a pipeline to the data, followed by the
fit_predict method of the final estimator in the pipeline. Valid
only if the final estimator implements fit_predict.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of
the pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps
of the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
return self.steps[-1][-1].fit_predict(Xt, y, **fit_params)
@if_delegate_has_method(delegate='_final_estimator')
def predict_proba(self, X):
"""Applies transforms to the data, and the predict_proba method of the
final estimator. Valid only if the final estimator implements
predict_proba.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_proba(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def decision_function(self, X):
"""Applies transforms to the data, and the decision_function method of
the final estimator. Valid only if the final estimator implements
decision_function.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].decision_function(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def predict_log_proba(self, X):
"""Applies transforms to the data, and the predict_log_proba method of
the final estimator. Valid only if the final estimator implements
predict_log_proba.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_log_proba(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def transform(self, X):
"""Applies transforms to the data, and the transform method of the
final estimator. Valid only if the final estimator implements
transform.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps:
Xt = transform.transform(Xt)
return Xt
@if_delegate_has_method(delegate='_final_estimator')
def inverse_transform(self, X):
"""Applies inverse transform to the data.
Starts with the last step of the pipeline and applies ``inverse_transform`` in
inverse order of the pipeline steps.
Valid only if all steps of the pipeline implement inverse_transform.
Parameters
----------
X : iterable
Data to inverse transform. Must fulfill output requirements of the
last step of the pipeline.
"""
if X.ndim == 1:
warn("From version 0.19, a 1d X will not be reshaped in"
" pipeline.inverse_transform any more.", FutureWarning)
X = X[None, :]
Xt = X
for name, step in self.steps[::-1]:
Xt = step.inverse_transform(Xt)
return Xt
@if_delegate_has_method(delegate='_final_estimator')
def score(self, X, y=None):
"""Applies transforms to the data, and the score method of the
final estimator. Valid only if the final estimator implements
score.
Parameters
----------
X : iterable
Data to score. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Targets used for scoring. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].score(Xt, y)
@property
def classes_(self):
return self.steps[-1][-1].classes_
@property
def _pairwise(self):
# check if first estimator expects pairwise input
return getattr(self.steps[0][1], '_pairwise', False)
def _name_estimators(estimators):
"""Generate names for estimators."""
names = [type(estimator).__name__.lower() for estimator in estimators]
namecount = defaultdict(int)
for est, name in zip(estimators, names):
namecount[name] += 1
for k, v in list(six.iteritems(namecount)):
if v == 1:
del namecount[k]
for i in reversed(range(len(estimators))):
name = names[i]
if name in namecount:
names[i] += "-%d" % namecount[name]
namecount[name] -= 1
return list(zip(names, estimators))
def make_pipeline(*steps):
"""Construct a Pipeline from the given estimators.
This is a shorthand for the Pipeline constructor; it does not require, and
does not permit, naming the estimators. Instead, their names will be set
to the lowercase of their types automatically.
Examples
--------
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.preprocessing import StandardScaler
>>> make_pipeline(StandardScaler(), GaussianNB()) # doctest: +NORMALIZE_WHITESPACE
Pipeline(steps=[('standardscaler',
StandardScaler(copy=True, with_mean=True, with_std=True)),
('gaussiannb', GaussianNB())])
Returns
-------
p : Pipeline
"""
return Pipeline(_name_estimators(steps))
def _fit_one_transformer(transformer, X, y):
return transformer.fit(X, y)
def _transform_one(transformer, name, X, transformer_weights):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, multiply output
return transformer.transform(X) * transformer_weights[name]
return transformer.transform(X)
def _fit_transform_one(transformer, name, X, y, transformer_weights,
**fit_params):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, multiply output
if hasattr(transformer, 'fit_transform'):
X_transformed = transformer.fit_transform(X, y, **fit_params)
return X_transformed * transformer_weights[name], transformer
else:
X_transformed = transformer.fit(X, y, **fit_params).transform(X)
return X_transformed * transformer_weights[name], transformer
if hasattr(transformer, 'fit_transform'):
X_transformed = transformer.fit_transform(X, y, **fit_params)
return X_transformed, transformer
else:
X_transformed = transformer.fit(X, y, **fit_params).transform(X)
return X_transformed, transformer
class FeatureUnion(BaseEstimator, TransformerMixin):
"""Concatenates results of multiple transformer objects.
This estimator applies a list of transformer objects in parallel to the
input data, then concatenates the results. This is useful to combine
several feature extraction mechanisms into a single transformer.
Read more in the :ref:`User Guide <feature_union>`.
Parameters
----------
transformer_list: list of (string, transformer) tuples
List of transformer objects to be applied to the data. The first
half of each tuple is the name of the transformer.
n_jobs: int, optional
Number of jobs to run in parallel (default 1).
transformer_weights: dict, optional
Multiplicative weights for features per transformer.
Keys are transformer names, values the weights.
"""
def __init__(self, transformer_list, n_jobs=1, transformer_weights=None):
self.transformer_list = transformer_list
self.n_jobs = n_jobs
self.transformer_weights = transformer_weights
def get_feature_names(self):
"""Get feature names from all transformers.
Returns
-------
feature_names : list of strings
Names of the features produced by transform.
"""
feature_names = []
for name, trans in self.transformer_list:
if not hasattr(trans, 'get_feature_names'):
raise AttributeError("Transformer %s does not provide"
" get_feature_names." % str(name))
feature_names.extend([name + "__" + f for f in
trans.get_feature_names()])
return feature_names
def fit(self, X, y=None):
"""Fit all transformers using X.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data, used to fit transformers.
"""
transformers = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_one_transformer)(trans, X, y)
for name, trans in self.transformer_list)
self._update_transformer_list(transformers)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all transformers using X, transform the data and concatenate
results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
result = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_transform_one)(trans, name, X, y,
self.transformer_weights, **fit_params)
for name, trans in self.transformer_list)
Xs, transformers = zip(*result)
self._update_transformer_list(transformers)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def transform(self, X):
"""Transform X separately by each transformer, concatenate results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
Xs = Parallel(n_jobs=self.n_jobs)(
delayed(_transform_one)(trans, name, X, self.transformer_weights)
for name, trans in self.transformer_list)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def get_params(self, deep=True):
if not deep:
return super(FeatureUnion, self).get_params(deep=False)
else:
out = dict(self.transformer_list)
for name, trans in self.transformer_list:
for key, value in iteritems(trans.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
out.update(super(FeatureUnion, self).get_params(deep=False))
return out
def _update_transformer_list(self, transformers):
self.transformer_list[:] = [
(name, new)
for ((name, old), new) in zip(self.transformer_list, transformers)
]
# XXX it would be nice to have a keyword-only n_jobs argument to this function,
# but that's not allowed in Python 2.x.
def make_union(*transformers):
"""Construct a FeatureUnion from the given transformers.
This is a shorthand for the FeatureUnion constructor; it does not require,
and does not permit, naming the transformers. Instead, they will be given
names automatically based on their types. It also does not allow weighting.
Examples
--------
>>> from sklearn.decomposition import PCA, TruncatedSVD
>>> make_union(PCA(), TruncatedSVD()) # doctest: +NORMALIZE_WHITESPACE
FeatureUnion(n_jobs=1,
transformer_list=[('pca', PCA(copy=True, n_components=None,
whiten=False)),
('truncatedsvd',
TruncatedSVD(algorithm='randomized',
n_components=2, n_iter=5,
random_state=None, tol=0.0))],
transformer_weights=None)
Returns
-------
f : FeatureUnion
"""
return FeatureUnion(_name_estimators(transformers))
| bsd-3-clause |
jkthompson/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/cm.py | 70 | 5385 | """
This module contains the instantiations of color mapping classes
"""
import numpy as np
from numpy import ma
import matplotlib as mpl
import matplotlib.colors as colors
import matplotlib.cbook as cbook
from matplotlib._cm import *
def get_cmap(name=None, lut=None):
"""
Get a colormap instance, defaulting to rc values if *name* is None
"""
if name is None: name = mpl.rcParams['image.cmap']
if lut is None: lut = mpl.rcParams['image.lut']
assert(name in datad.keys())
return colors.LinearSegmentedColormap(name, datad[name], lut)
class ScalarMappable:
"""
This is a mixin class to support scalar -> RGBA mapping. Handles
normalization and colormapping
"""
def __init__(self, norm=None, cmap=None):
"""
*norm* is an instance of :class:`colors.Normalize` or one of
its subclasses, used to map luminance to 0-1. *cmap* is a
:mod:`cm` colormap instance, for example :data:`cm.jet`
"""
self.callbacksSM = cbook.CallbackRegistry((
'changed',))
if cmap is None: cmap = get_cmap()
if norm is None: norm = colors.Normalize()
self._A = None
self.norm = norm
self.cmap = cmap
self.colorbar = None
self.update_dict = {'array':False}
def set_colorbar(self, im, ax):
'set the colorbar image and axes associated with mappable'
self.colorbar = im, ax
def to_rgba(self, x, alpha=1.0, bytes=False):
'''Return a normalized rgba array corresponding to *x*. If *x*
is already an rgb array, insert *alpha*; if it is already
rgba, return it unchanged. If *bytes* is True, return rgba as
4 uint8s instead of 4 floats.
'''
try:
if x.ndim == 3:
if x.shape[2] == 3:
if x.dtype == np.uint8:
alpha = np.array(alpha*255, np.uint8)
m, n = x.shape[:2]
xx = np.empty(shape=(m,n,4), dtype = x.dtype)
xx[:,:,:3] = x
xx[:,:,3] = alpha
elif x.shape[2] == 4:
xx = x
else:
raise ValueError("third dimension must be 3 or 4")
if bytes and xx.dtype != np.uint8:
xx = (xx * 255).astype(np.uint8)
return xx
except AttributeError:
pass
x = ma.asarray(x)
x = self.norm(x)
x = self.cmap(x, alpha=alpha, bytes=bytes)
return x
def set_array(self, A):
'Set the image array from numpy array *A*'
self._A = A
self.update_dict['array'] = True
def get_array(self):
'Return the array'
return self._A
def get_cmap(self):
'return the colormap'
return self.cmap
def get_clim(self):
'return the min, max of the color limits for image scaling'
return self.norm.vmin, self.norm.vmax
def set_clim(self, vmin=None, vmax=None):
"""
set the norm limits for image scaling; if *vmin* is a length2
sequence, interpret it as ``(vmin, vmax)`` which is used to
support setp
ACCEPTS: a length 2 sequence of floats
"""
if (vmin is not None and vmax is None and
cbook.iterable(vmin) and len(vmin)==2):
vmin, vmax = vmin
if vmin is not None: self.norm.vmin = vmin
if vmax is not None: self.norm.vmax = vmax
self.changed()
def set_cmap(self, cmap):
"""
set the colormap for luminance data
ACCEPTS: a colormap
"""
if cmap is None: cmap = get_cmap()
self.cmap = cmap
self.changed()
def set_norm(self, norm):
'set the normalization instance'
if norm is None: norm = colors.Normalize()
self.norm = norm
self.changed()
def autoscale(self):
"""
Autoscale the scalar limits on the norm instance using the
current array
"""
if self._A is None:
raise TypeError('You must first set_array for mappable')
self.norm.autoscale(self._A)
self.changed()
def autoscale_None(self):
"""
Autoscale the scalar limits on the norm instance using the
current array, changing only limits that are None
"""
if self._A is None:
raise TypeError('You must first set_array for mappable')
self.norm.autoscale_None(self._A)
self.changed()
def add_checker(self, checker):
"""
Add an entry to a dictionary of boolean flags
that are set to True when the mappable is changed.
"""
self.update_dict[checker] = False
def check_update(self, checker):
"""
If mappable has changed since the last check,
return True; else return False
"""
if self.update_dict[checker]:
self.update_dict[checker] = False
return True
return False
def changed(self):
"""
Call this whenever the mappable is changed to notify all the
callbackSM listeners to the 'changed' signal
"""
self.callbacksSM.process('changed', self)
for key in self.update_dict:
self.update_dict[key] = True
| gpl-3.0 |
jat255/hyperspy_tools | hyperspy_tools/plotting.py | 1 | 14368 | # Copyright 2015 Joshua Taillon
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# #########################################################################
# This file contains the code necessary to load a DM survey image and plot
# it with Hyperspy, adding markers as necessary to show the spatial extents
# of the spectrum image, beam location, and spatial drift box
# #########################################################################
import seaborn as _sns
import matplotlib
from matplotlib.patches import Rectangle as _Rect
from mpl_toolkits.axes_grid1 import make_axes_locatable as _mal
import hyperspy
import hyperspy.api as _hs
import hyperspy.io_plugins.digital_micrograph as _dm
__all__ = ['fit_peak',
'add_colored_outlines',
'add_custom_colorbars',
'plot_dm3_survey_with_markers']
def fit_peak(sig, lower_bound, upper_bound, factor_num=None,):
"""
Fit a Gaussian peak in a range to a signal
Parameters
----------
sig: ~hyperspy.signal.Signal
Signal to fit
lower_bound: float
Lower bound of values in which to fit
upper_bound: float
Upper bound of values in which to fit
factor_num: int
if given, the fit will be performed on a decomposition component of
the signal (given by ``factor_num``), rather than the signal itself
Returns
-------
centre: float
center of the fitted Gaussian
"""
if factor_num is not None:
c1 = sig.get_decomposition_factors()[factor_num]
else:
c1 = sig
# noinspection PyUnresolvedReferences,PyProtectedMember
if isinstance(sig, hyperspy._signals.spectrum.Spectrum):
is_eels = False
else:
is_eels = True
if is_eels:
c1.set_microscope_parameters(beam_energy=200,
convergence_angle=12,
collection_angle=29)
m1 = c1.create_model(auto_background=False)
else:
m1 = c1.create_model()
g1 = _hs.model.components.Gaussian(centre=((float(lower_bound) +
upper_bound) / 2.0))
m1.append(g1)
m1.set_signal_range(lower_bound, upper_bound)
m1.fit()
m1.plot()
centre = g1.centre.value
return centre
def add_colored_outlines(fig,
signal,
num_images,
color_palette=_sns.color_palette(),
border=0.0,
lw=15):
"""
Add outlines to a matplotlib figure to make it easy to visualize with
the plotted spectra
Parameters
----------
fig: matplotlib.figure
figure to which to add outlines (this should not have colorbars,
add them later)
signal: ~hyperspy.signal.Signal
signal that has calibrated axes (in order to set bounds of rectangle)
num_images: int
number of images in figure
color_palette: list
list of colors to use for outlines
border: float
offset of rectangle from edge of data
lw: float
width of rectangle lines to plot
"""
x, y = signal.axes_manager[0].high_value, signal.axes_manager[1].high_value
for i in range(num_images):
ax = fig.get_axes()[num_images - (1 + i)]
# noinspection PyUnresolvedReferences
r = _Rect((border, border),
x - 1.5 * border,
y - 1.5 * border,
fill=False,
edgecolor=color_palette[num_images - (1 + i)],
alpha=1,
linewidth=lw)
ax.add_patch(r)
def add_custom_colorbars(fig,
tick_list=None):
"""
Add custom colorbars with ticks at specified values
Parameters
----------
fig: matplotlib.figure
figure to which to add colorbars (should not currently have colorbars)
tick_list: list
nested list with the position of ticks to be added to colorbars;
should have a length equal to the number of images in the figure
Example for a four plot figure:
>>> tick_list = [[120,200,280],
... [4,20,36],
... [0,8,16],
... [0,22,44]]
"""
for i, a in enumerate(fig.axes):
# if i == 2:
# a.get_images()[0].set_clim(0,16)
div = _mal(a)
cax = div.append_axes("right", size="5%", pad=0.05)
if tick_list is None:
_ = fig.colorbar(a.get_images()[0], cax=cax)
else:
# noinspection PyUnresolvedReferences
_ = fig.colorbar(a.get_images()[0], cax=cax, ticks=tick_list[i])
def plot_dm3_survey_with_markers(fname,
add_text=True,
plot_beam=True,
plot_si=True,
plot_drift=True,
x_offset=1.0,
y_offset=1.0,
im_scale=1.0,
text_size='xx-small',
**kwargs):
"""
Plot a hyperspy signal with the markers from digital micrograph enabled
Parameters
----------
fname : str
Name of .dm3 file to load
add_text : bool
Switch to control if labels for the markers are added to the plot
plot_beam : bool
Switch to control if beam point is plotted (if present)
plot_si : bool
Switch to control if spectrum image box or line is plotted (if present)
plot_drift : bool
Switch to control if spatial drift box is plotted (if present)
x_offset : float
multiplier to control how far the text will be offset from its
default position (in the x-direction)
y_offset : float
multiplier to control how far the text will be offset from its
default position (in the y-direction)
im_scale : float
will scale the survey image by a given factor (useful for correcting
image scale bars if the calibration is incorrect)
text_size : str or float
size of the text that will be written on the image (follows same
convention as the `Text
<http://matplotlib.org/1.3.0/api/artist_api.html#matplotlib.text.
Text.set_size>`_ matplotlib Artist
**kwargs
Other keyword arguments are passed to hs.signal.plot()
Returns
-------
im: hyperspy._signals.image.Image
HyperSpy signal with markers added. While the figure is open,
the figure can be saved with a call such as
``im._plot.signal_plot.figure.savefig()``
"""
def _add_beam(image, location, annotationtype):
if plot_beam:
beam_m = _hs.plot.markers.point(x=location[1],
y=location[0],
color='red')
image.add_marker(beam_m)
if add_text:
beam_text_m = _hs.plot.markers.text(x=location[1] - (0.5 *
x_offset),
y=location[0] - (1.5 *
y_offset),
color='red',
text='Beam',
size=text_size)
image.add_marker(beam_text_m)
else:
pass
def _add_si(image, location, annotationtype):
# adds a green rectangle (or line, if the coordinates are such) to
# image
if plot_si:
if annotationtype == 23: # Map
si_m = _hs.plot.markers.rectangle(x1=location[1],
y1=location[0],
x2=location[3],
y2=location[2],
color='#13FF00')
elif annotationtype == 25: # Line profile
si_m = _hs.plot.markers.line_segment(x1=location[1],
y1=location[0],
x2=location[3],
y2=location[2],
color='#13FF00')
else:
raise Exception("No spectrum image annotation found")
image.add_marker(si_m)
if add_text:
si_text_m = _hs.plot.markers.text(x=location[1],
y=location[0] - (0.5 *
y_offset),
color='#13FF00',
text='Spectrum Image',
size=text_size)
image.add_marker(si_text_m)
else:
pass
def _add_drift(image, location, annotationtype):
if plot_drift:
drift_m = _hs.plot.markers.rectangle(x1=location[1],
y1=location[0],
x2=location[3],
y2=location[2],
color='yellow')
image.add_marker(drift_m)
if add_text:
drift_text_m = _hs.plot.markers.text(x=location[1],
y=location[0] - (0.5 *
y_offset),
color='yellow',
text='Spatial Drift',
size=text_size)
image.add_marker(drift_text_m)
else:
pass
im = _hs.load(fname)
flist = _dm.file_reader(fname)
annotation_list = flist[0]['original_metadata']['DocumentObjectList'][
'TagGroup0']['AnnotationGroupList']
im.axes_manager[0].scale *= im_scale
im.axes_manager[1].scale *= im_scale
scale = im.axes_manager[0].scale
mapping = {
'Beam': _add_beam,
'Spectrum Image': _add_si,
'Spatial Drift': _add_drift
}
im.plot(cmap='gist_gray', **kwargs)
for i in range(len(annotation_list)):
try:
label = annotation_list['TagGroup' + str(i)]['Label']
loc = annotation_list['TagGroup' + str(i)]['Rectangle']
scaled_loc = [scale * i for i in loc]
annotationtype = annotation_list['TagGroup' + str(i)]['AnnotationType']
mapping[label](im, scaled_loc, annotationtype)
except KeyError:
pass
return im # Returns a hyperspy._signals.image.Image, and with it all
# editing and saving capabilities.
def plot_decomposition_results(signal,
list_of_comps,
labels=None,
factor_style='cascade',
loadings_cmap='cubehelix',
mva_type='decomposition',
loadings_kwargs={},
factors_kwargs={}):
"""
Plot the results of a area SI decomposition (components and factors)
Parameters
----------
signal: ~hyperspy.signal.Signal
2D signal for which to plot decomposition results
list_of_comps: list
should be a list of ints that defines which factors to plot. Can also
have a tuple (or another iterable) in place of an int, in which case
the prescribed components will be summed in the output
labels: list
list of labels to use for components
factor_style: string
string defining which plot_spectra style to use
loadings_cmap: string
colormap to use for the loadings plots
mva_type: string
'decomposition' or 'bss'. Defines which HyperSpy method to use
loadings_kwargs: dict
other keyword arguments passed to plot_images for the loadings plot
factors_kwargs: dict
other keyword arguments passed to plot_spectra for the factors plot
"""
if mva_type == 'decomposition':
l = signal.get_decomposition_loadings()
f = signal.get_decomposition_factors()
elif mva_type == 'bss':
l = signal.get_bss_loadings()
f = signal.get_bss_factors()
else:
raise ValueError('Did not understand mva_type: {}'.format(mva_type))
l_to_plot = []
f_to_plot = []
for i in list_of_comps:
try:
l_x = [l.inav[j] for j in iter(i)]
f_x = [f.inav[j] for j in iter(i)]
summed_comp = l_x[0]
summed_fact = f_x[0]
for j in range(1, len(l_x)):
summed_comp += l_x[j]
summed_fact += f_x[j]
l_to_plot.append(summed_comp)
f_to_plot.append(summed_fact)
except:
l_to_plot.append(l.inav[i])
f_to_plot.append(f.inav[i])
_hs.plot.plot_images(l_to_plot,
cmap=loadings_cmap,
label=labels,
**loadings_kwargs)
_hs.plot.plot_spectra(f_to_plot,
style=factor_style,
legend=labels,
**factors_kwargs)
| gpl-3.0 |
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/pandas/core/indexes/timedeltas.py | 3 | 33026 | """ implement the TimedeltaIndex """
from datetime import timedelta
import numpy as np
from pandas.core.dtypes.common import (
_TD_DTYPE,
is_integer, is_float,
is_bool_dtype,
is_list_like,
is_scalar,
is_integer_dtype,
is_object_dtype,
is_timedelta64_dtype,
is_timedelta64_ns_dtype,
_ensure_int64)
from pandas.core.dtypes.missing import isnull
from pandas.core.dtypes.generic import ABCSeries
from pandas.core.common import _maybe_box, _values_from_object, is_bool_indexer
from pandas.core.indexes.base import Index
from pandas.core.indexes.numeric import Int64Index
import pandas.compat as compat
from pandas.compat import u
from pandas.tseries.frequencies import to_offset
from pandas.core.algorithms import checked_add_with_arr
from pandas.core.base import _shared_docs
from pandas.core.indexes.base import _index_shared_docs
import pandas.core.common as com
import pandas.core.dtypes.concat as _concat
from pandas.util._decorators import Appender, Substitution, deprecate_kwarg
from pandas.core.indexes.datetimelike import TimelikeOps, DatetimeIndexOpsMixin
from pandas.core.tools.timedeltas import (
to_timedelta, _coerce_scalar_to_timedelta_type)
from pandas.tseries.offsets import Tick, DateOffset
from pandas._libs import (lib, index as libindex, tslib as libts,
join as libjoin, Timedelta, NaT, iNaT)
def _td_index_cmp(opname, nat_result=False):
"""
Wrap comparison operations to convert timedelta-like to timedelta64
"""
def wrapper(self, other):
msg = "cannot compare a TimedeltaIndex with type {0}"
func = getattr(super(TimedeltaIndex, self), opname)
if _is_convertible_to_td(other) or other is NaT:
try:
other = _to_m8(other)
except ValueError:
# failed to parse as timedelta
raise TypeError(msg.format(type(other)))
result = func(other)
if isnull(other):
result.fill(nat_result)
else:
if not is_list_like(other):
raise TypeError(msg.format(type(other)))
other = TimedeltaIndex(other).values
result = func(other)
result = _values_from_object(result)
if isinstance(other, Index):
o_mask = other.values.view('i8') == iNaT
else:
o_mask = other.view('i8') == iNaT
if o_mask.any():
result[o_mask] = nat_result
if self.hasnans:
result[self._isnan] = nat_result
# support of bool dtype indexers
if is_bool_dtype(result):
return result
return Index(result)
return wrapper
class TimedeltaIndex(DatetimeIndexOpsMixin, TimelikeOps, Int64Index):
"""
Immutable ndarray of timedelta64 data, represented internally as int64, and
which can be boxed to timedelta objects
Parameters
----------
data : array-like (1-dimensional), optional
Optional timedelta-like data to construct index with
unit: unit of the arg (D,h,m,s,ms,us,ns) denote the unit, optional
which is an integer/float number
freq: a frequency for the index, optional
copy : bool
Make a copy of input ndarray
start : starting value, timedelta-like, optional
If data is None, start is used as the start point in generating regular
timedelta data.
periods : int, optional, > 0
Number of periods to generate, if generating index. Takes precedence
over end argument
end : end time, timedelta-like, optional
If periods is none, generated index will extend to first conforming
time on or just past end argument
closed : string or None, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
name : object
Name to be stored in the index
Notes
-----
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
"""
_typ = 'timedeltaindex'
_join_precedence = 10
def _join_i8_wrapper(joinf, **kwargs):
return DatetimeIndexOpsMixin._join_i8_wrapper(
joinf, dtype='m8[ns]', **kwargs)
_inner_indexer = _join_i8_wrapper(libjoin.inner_join_indexer_int64)
_outer_indexer = _join_i8_wrapper(libjoin.outer_join_indexer_int64)
_left_indexer = _join_i8_wrapper(libjoin.left_join_indexer_int64)
_left_indexer_unique = _join_i8_wrapper(
libjoin.left_join_indexer_unique_int64, with_indexers=False)
_arrmap = None
# define my properties & methods for delegation
_other_ops = []
_bool_ops = []
_object_ops = ['freq']
_field_ops = ['days', 'seconds', 'microseconds', 'nanoseconds']
_datetimelike_ops = _field_ops + _object_ops + _bool_ops
_datetimelike_methods = ["to_pytimedelta", "total_seconds",
"round", "floor", "ceil"]
__eq__ = _td_index_cmp('__eq__')
__ne__ = _td_index_cmp('__ne__', nat_result=True)
__lt__ = _td_index_cmp('__lt__')
__gt__ = _td_index_cmp('__gt__')
__le__ = _td_index_cmp('__le__')
__ge__ = _td_index_cmp('__ge__')
_engine_type = libindex.TimedeltaEngine
_comparables = ['name', 'freq']
_attributes = ['name', 'freq']
_is_numeric_dtype = True
_infer_as_myclass = True
freq = None
def __new__(cls, data=None, unit=None,
freq=None, start=None, end=None, periods=None,
copy=False, name=None,
closed=None, verify_integrity=True, **kwargs):
if isinstance(data, TimedeltaIndex) and freq is None and name is None:
if copy:
return data.copy()
else:
return data._shallow_copy()
freq_infer = False
if not isinstance(freq, DateOffset):
# if a passed freq is None, don't infer automatically
if freq != 'infer':
freq = to_offset(freq)
else:
freq_infer = True
freq = None
if periods is not None:
if is_float(periods):
periods = int(periods)
elif not is_integer(periods):
raise ValueError('Periods must be a number, got %s' %
str(periods))
if data is None and freq is None:
raise ValueError("Must provide freq argument if no data is "
"supplied")
if data is None:
return cls._generate(start, end, periods, name, freq,
closed=closed)
if unit is not None:
data = to_timedelta(data, unit=unit, box=False)
if not isinstance(data, (np.ndarray, Index, ABCSeries)):
if is_scalar(data):
raise ValueError('TimedeltaIndex() must be called with a '
'collection of some kind, %s was passed'
% repr(data))
# convert if not already
if getattr(data, 'dtype', None) != _TD_DTYPE:
data = to_timedelta(data, unit=unit, box=False)
elif copy:
data = np.array(data, copy=True)
# check that we are matching freqs
if verify_integrity and len(data) > 0:
if freq is not None and not freq_infer:
index = cls._simple_new(data, name=name)
inferred = index.inferred_freq
if inferred != freq.freqstr:
on_freq = cls._generate(
index[0], None, len(index), name, freq)
if not np.array_equal(index.asi8, on_freq.asi8):
raise ValueError('Inferred frequency {0} from passed '
'timedeltas does not conform to '
'passed frequency {1}'
.format(inferred, freq.freqstr))
index.freq = freq
return index
if freq_infer:
index = cls._simple_new(data, name=name)
inferred = index.inferred_freq
if inferred:
index.freq = to_offset(inferred)
return index
return cls._simple_new(data, name=name, freq=freq)
@classmethod
def _generate(cls, start, end, periods, name, offset, closed=None):
if com._count_not_none(start, end, periods) != 2:
raise ValueError('Must specify two of start, end, or periods')
if start is not None:
start = Timedelta(start)
if end is not None:
end = Timedelta(end)
left_closed = False
right_closed = False
if start is None and end is None:
if closed is not None:
raise ValueError("Closed has to be None if not both of start"
"and end are defined")
if closed is None:
left_closed = True
right_closed = True
elif closed == "left":
left_closed = True
elif closed == "right":
right_closed = True
else:
raise ValueError("Closed has to be either 'left', 'right' or None")
index = _generate_regular_range(start, end, periods, offset)
index = cls._simple_new(index, name=name, freq=offset)
if not left_closed:
index = index[1:]
if not right_closed:
index = index[:-1]
return index
@property
def _box_func(self):
return lambda x: Timedelta(x, unit='ns')
@classmethod
def _simple_new(cls, values, name=None, freq=None, **kwargs):
values = np.array(values, copy=False)
if values.dtype == np.object_:
values = libts.array_to_timedelta64(values)
if values.dtype != _TD_DTYPE:
values = _ensure_int64(values).view(_TD_DTYPE)
result = object.__new__(cls)
result._data = values
result.name = name
result.freq = freq
result._reset_identity()
return result
@property
def _formatter_func(self):
from pandas.io.formats.format import _get_format_timedelta64
return _get_format_timedelta64(self, box=True)
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if isinstance(state, dict):
super(TimedeltaIndex, self).__setstate__(state)
else:
raise Exception("invalid pickle state")
_unpickle_compat = __setstate__
def _maybe_update_attributes(self, attrs):
""" Update Index attributes (e.g. freq) depending on op """
freq = attrs.get('freq', None)
if freq is not None:
# no need to infer if freq is None
attrs['freq'] = 'infer'
return attrs
def _add_delta(self, delta):
if isinstance(delta, (Tick, timedelta, np.timedelta64)):
new_values = self._add_delta_td(delta)
name = self.name
elif isinstance(delta, TimedeltaIndex):
new_values = self._add_delta_tdi(delta)
# update name when delta is index
name = com._maybe_match_name(self, delta)
else:
raise ValueError("cannot add the type {0} to a TimedeltaIndex"
.format(type(delta)))
result = TimedeltaIndex(new_values, freq='infer', name=name)
return result
def _evaluate_with_timedelta_like(self, other, op, opstr):
# allow division by a timedelta
if opstr in ['__div__', '__truediv__', '__floordiv__']:
if _is_convertible_to_td(other):
other = Timedelta(other)
if isnull(other):
raise NotImplementedError(
"division by pd.NaT not implemented")
i8 = self.asi8
if opstr in ['__floordiv__']:
result = i8 // other.value
else:
result = op(i8, float(other.value))
result = self._maybe_mask_results(result, convert='float64')
return Index(result, name=self.name, copy=False)
return NotImplemented
def _add_datelike(self, other):
# adding a timedeltaindex to a datetimelike
from pandas import Timestamp, DatetimeIndex
if other is NaT:
result = self._nat_new(box=False)
else:
other = Timestamp(other)
i8 = self.asi8
result = checked_add_with_arr(i8, other.value)
result = self._maybe_mask_results(result, fill_value=iNaT)
return DatetimeIndex(result, name=self.name, copy=False)
def _sub_datelike(self, other):
from pandas import DatetimeIndex
if other is NaT:
result = self._nat_new(box=False)
else:
raise TypeError("cannot subtract a datelike from a TimedeltaIndex")
return DatetimeIndex(result, name=self.name, copy=False)
def _format_native_types(self, na_rep=u('NaT'),
date_format=None, **kwargs):
from pandas.io.formats.format import Timedelta64Formatter
return Timedelta64Formatter(values=self,
nat_rep=na_rep,
justify='all').get_result()
def _get_field(self, m):
values = self.asi8
hasnans = self.hasnans
if hasnans:
result = np.empty(len(self), dtype='float64')
mask = self._isnan
imask = ~mask
result.flat[imask] = np.array(
[getattr(Timedelta(val), m) for val in values[imask]])
result[mask] = np.nan
else:
result = np.array([getattr(Timedelta(val), m)
for val in values], dtype='int64')
return Index(result, name=self.name)
@property
def days(self):
""" Number of days for each element. """
return self._get_field('days')
@property
def seconds(self):
""" Number of seconds (>= 0 and less than 1 day) for each element. """
return self._get_field('seconds')
@property
def microseconds(self):
"""
Number of microseconds (>= 0 and less than 1 second) for each
element. """
return self._get_field('microseconds')
@property
def nanoseconds(self):
"""
Number of nanoseconds (>= 0 and less than 1 microsecond) for each
element.
"""
return self._get_field('nanoseconds')
@property
def components(self):
"""
Return a dataframe of the components (days, hours, minutes,
seconds, milliseconds, microseconds, nanoseconds) of the Timedeltas.
Returns
-------
a DataFrame
"""
from pandas import DataFrame
columns = ['days', 'hours', 'minutes', 'seconds',
'milliseconds', 'microseconds', 'nanoseconds']
hasnans = self.hasnans
if hasnans:
def f(x):
if isnull(x):
return [np.nan] * len(columns)
return x.components
else:
def f(x):
return x.components
result = DataFrame([f(x) for x in self])
result.columns = columns
if not hasnans:
result = result.astype('int64')
return result
def total_seconds(self):
"""
Total duration of each element expressed in seconds.
.. versionadded:: 0.17.0
"""
return Index(self._maybe_mask_results(1e-9 * self.asi8),
name=self.name)
def to_pytimedelta(self):
"""
Return TimedeltaIndex as object ndarray of datetime.timedelta objects
Returns
-------
datetimes : ndarray
"""
return libts.ints_to_pytimedelta(self.asi8)
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True):
dtype = np.dtype(dtype)
if is_object_dtype(dtype):
return self.asobject
elif is_timedelta64_ns_dtype(dtype):
if copy is True:
return self.copy()
return self
elif is_timedelta64_dtype(dtype):
# return an index (essentially this is division)
result = self.values.astype(dtype, copy=copy)
if self.hasnans:
return Index(self._maybe_mask_results(result,
convert='float64'),
name=self.name)
return Index(result.astype('i8'), name=self.name)
elif is_integer_dtype(dtype):
return Index(self.values.astype('i8', copy=copy), dtype='i8',
name=self.name)
raise ValueError('Cannot cast TimedeltaIndex to dtype %s' % dtype)
def union(self, other):
"""
Specialized union for TimedeltaIndex objects. If combine
overlapping ranges with the same DateOffset, will be much
faster than Index.union
Parameters
----------
other : TimedeltaIndex or array-like
Returns
-------
y : Index or TimedeltaIndex
"""
self._assert_can_do_setop(other)
if not isinstance(other, TimedeltaIndex):
try:
other = TimedeltaIndex(other)
except (TypeError, ValueError):
pass
this, other = self, other
if this._can_fast_union(other):
return this._fast_union(other)
else:
result = Index.union(this, other)
if isinstance(result, TimedeltaIndex):
if result.freq is None:
result.freq = to_offset(result.inferred_freq)
return result
def join(self, other, how='left', level=None, return_indexers=False,
sort=False):
"""
See Index.join
"""
if _is_convertible_to_index(other):
try:
other = TimedeltaIndex(other)
except (TypeError, ValueError):
pass
return Index.join(self, other, how=how, level=level,
return_indexers=return_indexers,
sort=sort)
def _wrap_joined_index(self, joined, other):
name = self.name if self.name == other.name else None
if (isinstance(other, TimedeltaIndex) and self.freq == other.freq and
self._can_fast_union(other)):
joined = self._shallow_copy(joined, name=name)
return joined
else:
return self._simple_new(joined, name)
def _can_fast_union(self, other):
if not isinstance(other, TimedeltaIndex):
return False
freq = self.freq
if freq is None or freq != other.freq:
return False
if not self.is_monotonic or not other.is_monotonic:
return False
if len(self) == 0 or len(other) == 0:
return True
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
right_start = right[0]
left_end = left[-1]
# Only need to "adjoin", not overlap
return (right_start == left_end + freq) or right_start in left
def _fast_union(self, other):
if len(other) == 0:
return self.view(type(self))
if len(self) == 0:
return other.view(type(self))
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
left_end = left[-1]
right_end = right[-1]
# concatenate
if left_end < right_end:
loc = right.searchsorted(left_end, side='right')
right_chunk = right.values[loc:]
dates = _concat._concat_compat((left.values, right_chunk))
return self._shallow_copy(dates)
else:
return left
def _wrap_union_result(self, other, result):
name = self.name if self.name == other.name else None
return self._simple_new(result, name=name, freq=None)
def intersection(self, other):
"""
Specialized intersection for TimedeltaIndex objects. May be much faster
than Index.intersection
Parameters
----------
other : TimedeltaIndex or array-like
Returns
-------
y : Index or TimedeltaIndex
"""
self._assert_can_do_setop(other)
if not isinstance(other, TimedeltaIndex):
try:
other = TimedeltaIndex(other)
except (TypeError, ValueError):
pass
result = Index.intersection(self, other)
return result
if len(self) == 0:
return self
if len(other) == 0:
return other
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
end = min(left[-1], right[-1])
start = right[0]
if end < start:
return type(self)(data=[])
else:
lslice = slice(*left.slice_locs(start, end))
left_chunk = left.values[lslice]
return self._shallow_copy(left_chunk)
def _maybe_promote(self, other):
if other.inferred_type == 'timedelta':
other = TimedeltaIndex(other)
return self, other
def get_value(self, series, key):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
if _is_convertible_to_td(key):
key = Timedelta(key)
return self.get_value_maybe_box(series, key)
try:
return _maybe_box(self, Index.get_value(self, series, key),
series, key)
except KeyError:
try:
loc = self._get_string_slice(key)
return series[loc]
except (TypeError, ValueError, KeyError):
pass
try:
return self.get_value_maybe_box(series, key)
except (TypeError, ValueError, KeyError):
raise KeyError(key)
def get_value_maybe_box(self, series, key):
if not isinstance(key, Timedelta):
key = Timedelta(key)
values = self._engine.get_value(_values_from_object(series), key)
return _maybe_box(self, values, series, key)
def get_loc(self, key, method=None, tolerance=None):
"""
Get integer location for requested label
Returns
-------
loc : int
"""
if is_bool_indexer(key) or is_timedelta64_dtype(key):
raise TypeError
if isnull(key):
key = NaT
if tolerance is not None:
# try converting tolerance now, so errors don't get swallowed by
# the try/except clauses below
tolerance = self._convert_tolerance(tolerance)
if _is_convertible_to_td(key):
key = Timedelta(key)
return Index.get_loc(self, key, method, tolerance)
try:
return Index.get_loc(self, key, method, tolerance)
except (KeyError, ValueError, TypeError):
try:
return self._get_string_slice(key)
except (TypeError, KeyError, ValueError):
pass
try:
stamp = Timedelta(key)
return Index.get_loc(self, stamp, method, tolerance)
except (KeyError, ValueError):
raise KeyError(key)
def _maybe_cast_slice_bound(self, label, side, kind):
"""
If label is a string, cast it to timedelta according to resolution.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'ix', 'loc', 'getitem'}
Returns
-------
label : object
"""
assert kind in ['ix', 'loc', 'getitem', None]
if isinstance(label, compat.string_types):
parsed = _coerce_scalar_to_timedelta_type(label, box=True)
lbound = parsed.round(parsed.resolution)
if side == 'left':
return lbound
else:
return (lbound + to_offset(parsed.resolution) -
Timedelta(1, 'ns'))
elif is_integer(label) or is_float(label):
self._invalid_indexer('slice', label)
return label
def _get_string_slice(self, key, use_lhs=True, use_rhs=True):
freq = getattr(self, 'freqstr',
getattr(self, 'inferred_freq', None))
if is_integer(key) or is_float(key) or key is NaT:
self._invalid_indexer('slice', key)
loc = self._partial_td_slice(key, freq, use_lhs=use_lhs,
use_rhs=use_rhs)
return loc
def _partial_td_slice(self, key, freq, use_lhs=True, use_rhs=True):
# given a key, try to figure out a location for a partial slice
if not isinstance(key, compat.string_types):
return key
raise NotImplementedError
# TODO(wesm): dead code
# parsed = _coerce_scalar_to_timedelta_type(key, box=True)
# is_monotonic = self.is_monotonic
# # figure out the resolution of the passed td
# # and round to it
# # t1 = parsed.round(reso)
# t2 = t1 + to_offset(parsed.resolution) - Timedelta(1, 'ns')
# stamps = self.asi8
# if is_monotonic:
# # we are out of range
# if (len(stamps) and ((use_lhs and t1.value < stamps[0] and
# t2.value < stamps[0]) or
# ((use_rhs and t1.value > stamps[-1] and
# t2.value > stamps[-1])))):
# raise KeyError
# # a monotonic (sorted) series can be sliced
# left = (stamps.searchsorted(t1.value, side='left')
# if use_lhs else None)
# right = (stamps.searchsorted(t2.value, side='right')
# if use_rhs else None)
# return slice(left, right)
# lhs_mask = (stamps >= t1.value) if use_lhs else True
# rhs_mask = (stamps <= t2.value) if use_rhs else True
# # try to find a the dates
# return (lhs_mask & rhs_mask).nonzero()[0]
@Substitution(klass='TimedeltaIndex')
@Appender(_shared_docs['searchsorted'])
@deprecate_kwarg(old_arg_name='key', new_arg_name='value')
def searchsorted(self, value, side='left', sorter=None):
if isinstance(value, (np.ndarray, Index)):
value = np.array(value, dtype=_TD_DTYPE, copy=False)
else:
value = _to_m8(value)
return self.values.searchsorted(value, side=side, sorter=sorter)
def is_type_compatible(self, typ):
return typ == self.inferred_type or typ == 'timedelta'
@property
def inferred_type(self):
return 'timedelta64'
@property
def dtype(self):
return _TD_DTYPE
@property
def is_all_dates(self):
return True
def insert(self, loc, item):
"""
Make new Index inserting new item at location
Parameters
----------
loc : int
item : object
if not either a Python datetime or a numpy integer-like, returned
Index dtype will be object rather than datetime.
Returns
-------
new_index : Index
"""
# try to convert if possible
if _is_convertible_to_td(item):
try:
item = Timedelta(item)
except:
pass
freq = None
if isinstance(item, (Timedelta, libts.NaTType)):
# check freq can be preserved on edge cases
if self.freq is not None:
if ((loc == 0 or loc == -len(self)) and
item + self.freq == self[0]):
freq = self.freq
elif (loc == len(self)) and item - self.freq == self[-1]:
freq = self.freq
item = _to_m8(item)
try:
new_tds = np.concatenate((self[:loc].asi8, [item.view(np.int64)],
self[loc:].asi8))
return TimedeltaIndex(new_tds, name=self.name, freq=freq)
except (AttributeError, TypeError):
# fall back to object index
if isinstance(item, compat.string_types):
return self.asobject.insert(loc, item)
raise TypeError(
"cannot insert TimedeltaIndex with incompatible label")
def delete(self, loc):
"""
Make a new DatetimeIndex with passed location(s) deleted.
Parameters
----------
loc: int, slice or array of ints
Indicate which sub-arrays to remove.
Returns
-------
new_index : TimedeltaIndex
"""
new_tds = np.delete(self.asi8, loc)
freq = 'infer'
if is_integer(loc):
if loc in (0, -len(self), -1, len(self) - 1):
freq = self.freq
else:
if is_list_like(loc):
loc = lib.maybe_indices_to_slice(
_ensure_int64(np.array(loc)), len(self))
if isinstance(loc, slice) and loc.step in (1, None):
if (loc.start in (0, None) or loc.stop in (len(self), None)):
freq = self.freq
return TimedeltaIndex(new_tds, name=self.name, freq=freq)
TimedeltaIndex._add_numeric_methods()
TimedeltaIndex._add_logical_methods_disabled()
TimedeltaIndex._add_datetimelike_methods()
def _is_convertible_to_index(other):
"""
return a boolean whether I can attempt conversion to a TimedeltaIndex
"""
if isinstance(other, TimedeltaIndex):
return True
elif (len(other) > 0 and
other.inferred_type not in ('floating', 'mixed-integer', 'integer',
'mixed-integer-float', 'mixed')):
return True
return False
def _is_convertible_to_td(key):
return isinstance(key, (DateOffset, timedelta, Timedelta,
np.timedelta64, compat.string_types))
def _to_m8(key):
"""
Timedelta-like => dt64
"""
if not isinstance(key, Timedelta):
# this also converts strings
key = Timedelta(key)
# return an type that can be compared
return np.int64(key.value).view(_TD_DTYPE)
def _generate_regular_range(start, end, periods, offset):
stride = offset.nanos
if periods is None:
b = Timedelta(start).value
e = Timedelta(end).value
e += stride - e % stride
elif start is not None:
b = Timedelta(start).value
e = b + periods * stride
elif end is not None:
e = Timedelta(end).value + stride
b = e - periods * stride
else:
raise ValueError("at least 'start' or 'end' should be specified "
"if a 'period' is given.")
data = np.arange(b, e, stride, dtype=np.int64)
data = TimedeltaIndex._simple_new(data, None)
return data
def timedelta_range(start=None, end=None, periods=None, freq='D',
name=None, closed=None):
"""
Return a fixed frequency timedelta index, with day as the default
frequency
Parameters
----------
start : string or timedelta-like, default None
Left bound for generating dates
end : string or datetime-like, default None
Right bound for generating dates
periods : integer or None, default None
If None, must specify start and end
freq : string or DateOffset, default 'D' (calendar daily)
Frequency strings can have multiples, e.g. '5H'
name : str, default None
Name of the resulting index
closed : string or None, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
Returns
-------
rng : TimedeltaIndex
Notes
-----
2 of start, end, or periods must be specified.
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
"""
return TimedeltaIndex(start=start, end=end, periods=periods,
freq=freq, name=name,
closed=closed)
| mit |
wanggang3333/scikit-learn | examples/datasets/plot_random_dataset.py | 348 | 2254 | """
==============================================
Plot randomly generated classification dataset
==============================================
Plot several randomly generated 2D classification datasets.
This example illustrates the :func:`datasets.make_classification`
:func:`datasets.make_blobs` and :func:`datasets.make_gaussian_quantiles`
functions.
For ``make_classification``, three binary and two multi-class classification
datasets are generated, with different numbers of informative features and
clusters per class. """
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_gaussian_quantiles
plt.figure(figsize=(8, 8))
plt.subplots_adjust(bottom=.05, top=.9, left=.05, right=.95)
plt.subplot(321)
plt.title("One informative feature, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=1,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(322)
plt.title("Two informative features, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(323)
plt.title("Two informative features, two clusters per class", fontsize='small')
X2, Y2 = make_classification(n_features=2, n_redundant=0, n_informative=2)
plt.scatter(X2[:, 0], X2[:, 1], marker='o', c=Y2)
plt.subplot(324)
plt.title("Multi-class, two informative features, one cluster",
fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(325)
plt.title("Three blobs", fontsize='small')
X1, Y1 = make_blobs(n_features=2, centers=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(326)
plt.title("Gaussian divided into three quantiles", fontsize='small')
X1, Y1 = make_gaussian_quantiles(n_features=2, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.show()
| bsd-3-clause |
cl4rke/scikit-learn | examples/neighbors/plot_classification.py | 287 | 1790 | """
================================
Nearest Neighbors Classification
================================
Sample usage of Nearest Neighbors classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import neighbors, datasets
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for weights in ['uniform', 'distance']:
# we create an instance of Neighbours Classifier and fit the data.
clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weights)
clf.fit(X, y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.title("3-Class classification (k = %i, weights = '%s')"
% (n_neighbors, weights))
plt.show()
| bsd-3-clause |
Weihonghao/ECM | Vpy34/lib/python3.5/site-packages/pandas/util/testing.py | 3 | 91023 | from __future__ import division
# pylint: disable-msg=W0402
import re
import string
import sys
import tempfile
import warnings
import inspect
import os
import subprocess
import locale
import traceback
from datetime import datetime
from functools import wraps, partial
from contextlib import contextmanager
from distutils.version import LooseVersion
from numpy.random import randn, rand
import numpy as np
import pandas as pd
from pandas.core.dtypes.missing import array_equivalent
from pandas.core.dtypes.common import (
is_datetimelike_v_numeric,
is_datetimelike_v_object,
is_number, is_bool,
needs_i8_conversion,
is_categorical_dtype,
is_interval_dtype,
is_sequence,
is_list_like)
from pandas.io.formats.printing import pprint_thing
from pandas.core.algorithms import take_1d
import pandas.compat as compat
from pandas.compat import (
filter, map, zip, range, unichr, lrange, lmap, lzip, u, callable, Counter,
raise_with_traceback, httplib, is_platform_windows, is_platform_32bit,
StringIO, PY3
)
from pandas.core.computation import expressions as expr
from pandas import (bdate_range, CategoricalIndex, Categorical, IntervalIndex,
DatetimeIndex, TimedeltaIndex, PeriodIndex, RangeIndex,
Index, MultiIndex,
Series, DataFrame, Panel, Panel4D)
from pandas._libs import testing as _testing
from pandas.io.common import urlopen
try:
import pytest
slow = pytest.mark.slow
except ImportError:
# Should be ok to just ignore. If you actually need
# slow then you'll hit an import error long before getting here.
pass
N = 30
K = 4
_RAISE_NETWORK_ERROR_DEFAULT = False
# set testing_mode
_testing_mode_warnings = (DeprecationWarning, compat.ResourceWarning)
def set_testing_mode():
# set the testing mode filters
testing_mode = os.environ.get('PANDAS_TESTING_MODE', 'None')
if 'deprecate' in testing_mode:
warnings.simplefilter('always', _testing_mode_warnings)
def reset_testing_mode():
# reset the testing mode filters
testing_mode = os.environ.get('PANDAS_TESTING_MODE', 'None')
if 'deprecate' in testing_mode:
warnings.simplefilter('ignore', _testing_mode_warnings)
set_testing_mode()
def reset_display_options():
"""
Reset the display options for printing and representing objects.
"""
pd.reset_option('^display.', silent=True)
def round_trip_pickle(obj, path=None):
"""
Pickle an object and then read it again.
Parameters
----------
obj : pandas object
The object to pickle and then re-read.
path : str, default None
The path where the pickled object is written and then read.
Returns
-------
round_trip_pickled_object : pandas object
The original object that was pickled and then re-read.
"""
if path is None:
path = u('__%s__.pickle' % rands(10))
with ensure_clean(path) as path:
pd.to_pickle(obj, path)
return pd.read_pickle(path)
def assert_almost_equal(left, right, check_exact=False,
check_dtype='equiv', check_less_precise=False,
**kwargs):
"""
Check that the left and right objects are approximately equal.
Parameters
----------
left : object
right : object
check_exact : bool, default True
Whether to compare number exactly.
check_dtype: bool, default True
check dtype if both a and b are the same type
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare
"""
if isinstance(left, pd.Index):
return assert_index_equal(left, right, check_exact=check_exact,
exact=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
elif isinstance(left, pd.Series):
return assert_series_equal(left, right, check_exact=check_exact,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
elif isinstance(left, pd.DataFrame):
return assert_frame_equal(left, right, check_exact=check_exact,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
else:
# other sequences
if check_dtype:
if is_number(left) and is_number(right):
# do not compare numeric classes, like np.float64 and float
pass
elif is_bool(left) and is_bool(right):
# do not compare bool classes, like np.bool_ and bool
pass
else:
if (isinstance(left, np.ndarray) or
isinstance(right, np.ndarray)):
obj = 'numpy array'
else:
obj = 'Input'
assert_class_equal(left, right, obj=obj)
return _testing.assert_almost_equal(
left, right,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
def _check_isinstance(left, right, cls):
"""
Helper method for our assert_* methods that ensures that
the two objects being compared have the right type before
proceeding with the comparison.
Parameters
----------
left : The first object being compared.
right : The second object being compared.
cls : The class type to check against.
Raises
------
AssertionError : Either `left` or `right` is not an instance of `cls`.
"""
err_msg = "{0} Expected type {1}, found {2} instead"
cls_name = cls.__name__
if not isinstance(left, cls):
raise AssertionError(err_msg.format(cls_name, cls, type(left)))
if not isinstance(right, cls):
raise AssertionError(err_msg.format(cls_name, cls, type(right)))
def assert_dict_equal(left, right, compare_keys=True):
_check_isinstance(left, right, dict)
return _testing.assert_dict_equal(left, right, compare_keys=compare_keys)
def randbool(size=(), p=0.5):
return rand(*size) <= p
RANDS_CHARS = np.array(list(string.ascii_letters + string.digits),
dtype=(np.str_, 1))
RANDU_CHARS = np.array(list(u("").join(map(unichr, lrange(1488, 1488 + 26))) +
string.digits), dtype=(np.unicode_, 1))
def rands_array(nchars, size, dtype='O'):
"""Generate an array of byte strings."""
retval = (np.random.choice(RANDS_CHARS, size=nchars * np.prod(size))
.view((np.str_, nchars)).reshape(size))
if dtype is None:
return retval
else:
return retval.astype(dtype)
def randu_array(nchars, size, dtype='O'):
"""Generate an array of unicode strings."""
retval = (np.random.choice(RANDU_CHARS, size=nchars * np.prod(size))
.view((np.unicode_, nchars)).reshape(size))
if dtype is None:
return retval
else:
return retval.astype(dtype)
def rands(nchars):
"""
Generate one random byte string.
See `rands_array` if you want to create an array of random strings.
"""
return ''.join(np.random.choice(RANDS_CHARS, nchars))
def randu(nchars):
"""
Generate one random unicode string.
See `randu_array` if you want to create an array of random unicode strings.
"""
return ''.join(np.random.choice(RANDU_CHARS, nchars))
def close(fignum=None):
from matplotlib.pyplot import get_fignums, close as _close
if fignum is None:
for fignum in get_fignums():
_close(fignum)
else:
_close(fignum)
def _skip_if_32bit():
import pytest
if is_platform_32bit():
pytest.skip("skipping for 32 bit")
def _skip_module_if_no_mpl():
import pytest
mpl = pytest.importorskip("matplotlib")
mpl.use("Agg", warn=False)
def _skip_if_no_mpl():
try:
import matplotlib as mpl
mpl.use("Agg", warn=False)
except ImportError:
import pytest
pytest.skip("matplotlib not installed")
def _skip_if_mpl_1_5():
import matplotlib as mpl
v = mpl.__version__
if v > LooseVersion('1.4.3') or v[0] == '0':
import pytest
pytest.skip("matplotlib 1.5")
else:
mpl.use("Agg", warn=False)
def _skip_if_no_scipy():
try:
import scipy.stats # noqa
except ImportError:
import pytest
pytest.skip("no scipy.stats module")
try:
import scipy.interpolate # noqa
except ImportError:
import pytest
pytest.skip('scipy.interpolate missing')
try:
import scipy.sparse # noqa
except ImportError:
import pytest
pytest.skip('scipy.sparse missing')
def _check_if_lzma():
try:
return compat.import_lzma()
except ImportError:
return False
def _skip_if_no_lzma():
import pytest
return _check_if_lzma() or pytest.skip('need backports.lzma to run')
def _skip_if_no_xarray():
try:
import xarray
except ImportError:
import pytest
pytest.skip("xarray not installed")
v = xarray.__version__
if v < LooseVersion('0.7.0'):
import pytest
pytest.skip("xarray not version is too low: {0}".format(v))
def _skip_if_no_pytz():
try:
import pytz # noqa
except ImportError:
import pytest
pytest.skip("pytz not installed")
def _skip_if_no_dateutil():
try:
import dateutil # noqa
except ImportError:
import pytest
pytest.skip("dateutil not installed")
def _skip_if_windows_python_3():
if PY3 and is_platform_windows():
import pytest
pytest.skip("not used on python 3/win32")
def _skip_if_windows():
if is_platform_windows():
import pytest
pytest.skip("Running on Windows")
def _skip_if_no_pathlib():
try:
from pathlib import Path # noqa
except ImportError:
import pytest
pytest.skip("pathlib not available")
def _skip_if_no_localpath():
try:
from py.path import local as LocalPath # noqa
except ImportError:
import pytest
pytest.skip("py.path not installed")
def _incompat_bottleneck_version(method):
""" skip if we have bottleneck installed
and its >= 1.0
as we don't match the nansum/nanprod behavior for all-nan
ops, see GH9422
"""
if method not in ['sum', 'prod']:
return False
try:
import bottleneck as bn
return bn.__version__ >= LooseVersion('1.0')
except ImportError:
return False
def skip_if_no_ne(engine='numexpr'):
from pandas.core.computation.expressions import (
_USE_NUMEXPR,
_NUMEXPR_INSTALLED)
if engine == 'numexpr':
if not _USE_NUMEXPR:
import pytest
pytest.skip("numexpr enabled->{enabled}, "
"installed->{installed}".format(
enabled=_USE_NUMEXPR,
installed=_NUMEXPR_INSTALLED))
def _skip_if_has_locale():
import locale
lang, _ = locale.getlocale()
if lang is not None:
import pytest
pytest.skip("Specific locale is set {0}".format(lang))
def _skip_if_not_us_locale():
import locale
lang, _ = locale.getlocale()
if lang != 'en_US':
import pytest
pytest.skip("Specific locale is set {0}".format(lang))
def _skip_if_no_mock():
try:
import mock # noqa
except ImportError:
try:
from unittest import mock # noqa
except ImportError:
import nose
raise nose.SkipTest("mock is not installed")
def _skip_if_no_ipython():
try:
import IPython # noqa
except ImportError:
import nose
raise nose.SkipTest("IPython not installed")
# -----------------------------------------------------------------------------
# locale utilities
def check_output(*popenargs, **kwargs):
# shamelessly taken from Python 2.7 source
r"""Run command with arguments and return its output as a byte string.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> check_output(["ls", "-l", "/dev/null"])
'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> check_output(["/bin/sh", "-c",
... "ls -l non_existent_file ; exit 0"],
... stderr=STDOUT)
'ls: non_existent_file: No such file or directory\n'
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, stderr=subprocess.PIPE,
*popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd, output=output)
return output
def _default_locale_getter():
try:
raw_locales = check_output(['locale -a'], shell=True)
except subprocess.CalledProcessError as e:
raise type(e)("%s, the 'locale -a' command cannot be found on your "
"system" % e)
return raw_locales
def get_locales(prefix=None, normalize=True,
locale_getter=_default_locale_getter):
"""Get all the locales that are available on the system.
Parameters
----------
prefix : str
If not ``None`` then return only those locales with the prefix
provided. For example to get all English language locales (those that
start with ``"en"``), pass ``prefix="en"``.
normalize : bool
Call ``locale.normalize`` on the resulting list of available locales.
If ``True``, only locales that can be set without throwing an
``Exception`` are returned.
locale_getter : callable
The function to use to retrieve the current locales. This should return
a string with each locale separated by a newline character.
Returns
-------
locales : list of strings
A list of locale strings that can be set with ``locale.setlocale()``.
For example::
locale.setlocale(locale.LC_ALL, locale_string)
On error will return None (no locale available, e.g. Windows)
"""
try:
raw_locales = locale_getter()
except:
return None
try:
# raw_locales is "\n" separated list of locales
# it may contain non-decodable parts, so split
# extract what we can and then rejoin.
raw_locales = raw_locales.split(b'\n')
out_locales = []
for x in raw_locales:
if PY3:
out_locales.append(str(
x, encoding=pd.options.display.encoding))
else:
out_locales.append(str(x))
except TypeError:
pass
if prefix is None:
return _valid_locales(out_locales, normalize)
found = re.compile('%s.*' % prefix).findall('\n'.join(out_locales))
return _valid_locales(found, normalize)
@contextmanager
def set_locale(new_locale, lc_var=locale.LC_ALL):
"""Context manager for temporarily setting a locale.
Parameters
----------
new_locale : str or tuple
A string of the form <language_country>.<encoding>. For example to set
the current locale to US English with a UTF8 encoding, you would pass
"en_US.UTF-8".
Notes
-----
This is useful when you want to run a particular block of code under a
particular locale, without globally setting the locale. This probably isn't
thread-safe.
"""
current_locale = locale.getlocale()
try:
locale.setlocale(lc_var, new_locale)
try:
normalized_locale = locale.getlocale()
except ValueError:
yield new_locale
else:
if all(lc is not None for lc in normalized_locale):
yield '.'.join(normalized_locale)
else:
yield new_locale
finally:
locale.setlocale(lc_var, current_locale)
def _can_set_locale(lc):
"""Check to see if we can set a locale without throwing an exception.
Parameters
----------
lc : str
The locale to attempt to set.
Returns
-------
isvalid : bool
Whether the passed locale can be set
"""
try:
with set_locale(lc):
pass
except locale.Error: # horrible name for a Exception subclass
return False
else:
return True
def _valid_locales(locales, normalize):
"""Return a list of normalized locales that do not throw an ``Exception``
when set.
Parameters
----------
locales : str
A string where each locale is separated by a newline.
normalize : bool
Whether to call ``locale.normalize`` on each locale.
Returns
-------
valid_locales : list
A list of valid locales.
"""
if normalize:
normalizer = lambda x: locale.normalize(x.strip())
else:
normalizer = lambda x: x.strip()
return list(filter(_can_set_locale, map(normalizer, locales)))
# -----------------------------------------------------------------------------
# Stdout / stderr decorators
def capture_stdout(f):
"""
Decorator to capture stdout in a buffer so that it can be checked
(or suppressed) during testing.
Parameters
----------
f : callable
The test that is capturing stdout.
Returns
-------
f : callable
The decorated test ``f``, which captures stdout.
Examples
--------
>>> from pandas.util.testing import capture_stdout
>>>
>>> import sys
>>>
>>> @capture_stdout
... def test_print_pass():
... print("foo")
... out = sys.stdout.getvalue()
... assert out == "foo\n"
>>>
>>> @capture_stdout
... def test_print_fail():
... print("foo")
... out = sys.stdout.getvalue()
... assert out == "bar\n"
...
AssertionError: assert 'foo\n' == 'bar\n'
"""
@wraps(f)
def wrapper(*args, **kwargs):
try:
sys.stdout = StringIO()
f(*args, **kwargs)
finally:
sys.stdout = sys.__stdout__
return wrapper
def capture_stderr(f):
"""
Decorator to capture stderr in a buffer so that it can be checked
(or suppressed) during testing.
Parameters
----------
f : callable
The test that is capturing stderr.
Returns
-------
f : callable
The decorated test ``f``, which captures stderr.
Examples
--------
>>> from pandas.util.testing import capture_stderr
>>>
>>> import sys
>>>
>>> @capture_stderr
... def test_stderr_pass():
... sys.stderr.write("foo")
... out = sys.stderr.getvalue()
... assert out == "foo\n"
>>>
>>> @capture_stderr
... def test_stderr_fail():
... sys.stderr.write("foo")
... out = sys.stderr.getvalue()
... assert out == "bar\n"
...
AssertionError: assert 'foo\n' == 'bar\n'
"""
@wraps(f)
def wrapper(*args, **kwargs):
try:
sys.stderr = StringIO()
f(*args, **kwargs)
finally:
sys.stderr = sys.__stderr__
return wrapper
# -----------------------------------------------------------------------------
# Console debugging tools
def debug(f, *args, **kwargs):
from pdb import Pdb as OldPdb
try:
from IPython.core.debugger import Pdb
kw = dict(color_scheme='Linux')
except ImportError:
Pdb = OldPdb
kw = {}
pdb = Pdb(**kw)
return pdb.runcall(f, *args, **kwargs)
def pudebug(f, *args, **kwargs):
import pudb
return pudb.runcall(f, *args, **kwargs)
def set_trace():
from IPython.core.debugger import Pdb
try:
Pdb(color_scheme='Linux').set_trace(sys._getframe().f_back)
except:
from pdb import Pdb as OldPdb
OldPdb().set_trace(sys._getframe().f_back)
# -----------------------------------------------------------------------------
# contextmanager to ensure the file cleanup
@contextmanager
def ensure_clean(filename=None, return_filelike=False):
"""Gets a temporary path and agrees to remove on close.
Parameters
----------
filename : str (optional)
if None, creates a temporary file which is then removed when out of
scope. if passed, creates temporary file with filename as ending.
return_filelike : bool (default False)
if True, returns a file-like which is *always* cleaned. Necessary for
savefig and other functions which want to append extensions.
"""
filename = filename or ''
fd = None
if return_filelike:
f = tempfile.TemporaryFile(suffix=filename)
try:
yield f
finally:
f.close()
else:
# don't generate tempfile if using a path with directory specified
if len(os.path.dirname(filename)):
raise ValueError("Can't pass a qualified name to ensure_clean()")
try:
fd, filename = tempfile.mkstemp(suffix=filename)
except UnicodeEncodeError:
import pytest
pytest.skip('no unicode file names on this system')
try:
yield filename
finally:
try:
os.close(fd)
except Exception as e:
print("Couldn't close file descriptor: %d (file: %s)" %
(fd, filename))
try:
if os.path.exists(filename):
os.remove(filename)
except Exception as e:
print("Exception on removing file: %s" % e)
def get_data_path(f=''):
"""Return the path of a data file, these are relative to the current test
directory.
"""
# get our callers file
_, filename, _, _, _, _ = inspect.getouterframes(inspect.currentframe())[1]
base_dir = os.path.abspath(os.path.dirname(filename))
return os.path.join(base_dir, 'data', f)
# -----------------------------------------------------------------------------
# Comparators
def equalContents(arr1, arr2):
"""Checks if the set of unique elements of arr1 and arr2 are equivalent.
"""
return frozenset(arr1) == frozenset(arr2)
def assert_index_equal(left, right, exact='equiv', check_names=True,
check_less_precise=False, check_exact=True,
check_categorical=True, obj='Index'):
"""Check that left and right Index are equal.
Parameters
----------
left : Index
right : Index
exact : bool / string {'equiv'}, default False
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
check_names : bool, default True
Whether to check the names attribute.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare
check_exact : bool, default True
Whether to compare number exactly.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
obj : str, default 'Index'
Specify object name being compared, internally used to show appropriate
assertion message
"""
def _check_types(l, r, obj='Index'):
if exact:
assert_class_equal(left, right, exact=exact, obj=obj)
assert_attr_equal('dtype', l, r, obj=obj)
# allow string-like to have different inferred_types
if l.inferred_type in ('string', 'unicode'):
assert r.inferred_type in ('string', 'unicode')
else:
assert_attr_equal('inferred_type', l, r, obj=obj)
def _get_ilevel_values(index, level):
# accept level number only
unique = index.levels[level]
labels = index.labels[level]
filled = take_1d(unique.values, labels, fill_value=unique._na_value)
values = unique._shallow_copy(filled, name=index.names[level])
return values
# instance validation
_check_isinstance(left, right, Index)
# class / dtype comparison
_check_types(left, right, obj=obj)
# level comparison
if left.nlevels != right.nlevels:
raise_assert_detail(obj, '{0} levels are different'.format(obj),
'{0}, {1}'.format(left.nlevels, left),
'{0}, {1}'.format(right.nlevels, right))
# length comparison
if len(left) != len(right):
raise_assert_detail(obj, '{0} length are different'.format(obj),
'{0}, {1}'.format(len(left), left),
'{0}, {1}'.format(len(right), right))
# MultiIndex special comparison for little-friendly error messages
if left.nlevels > 1:
for level in range(left.nlevels):
# cannot use get_level_values here because it can change dtype
llevel = _get_ilevel_values(left, level)
rlevel = _get_ilevel_values(right, level)
lobj = 'MultiIndex level [{0}]'.format(level)
assert_index_equal(llevel, rlevel,
exact=exact, check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact, obj=lobj)
# get_level_values may change dtype
_check_types(left.levels[level], right.levels[level], obj=obj)
if check_exact:
if not left.equals(right):
diff = np.sum((left.values != right.values)
.astype(int)) * 100.0 / len(left)
msg = '{0} values are different ({1} %)'\
.format(obj, np.round(diff, 5))
raise_assert_detail(obj, msg, left, right)
else:
_testing.assert_almost_equal(left.values, right.values,
check_less_precise=check_less_precise,
check_dtype=exact,
obj=obj, lobj=left, robj=right)
# metadata comparison
if check_names:
assert_attr_equal('names', left, right, obj=obj)
if isinstance(left, pd.PeriodIndex) or isinstance(right, pd.PeriodIndex):
assert_attr_equal('freq', left, right, obj=obj)
if (isinstance(left, pd.IntervalIndex) or
isinstance(right, pd.IntervalIndex)):
assert_attr_equal('closed', left, right, obj=obj)
if check_categorical:
if is_categorical_dtype(left) or is_categorical_dtype(right):
assert_categorical_equal(left.values, right.values,
obj='{0} category'.format(obj))
def assert_class_equal(left, right, exact=True, obj='Input'):
"""checks classes are equal."""
def repr_class(x):
if isinstance(x, Index):
# return Index as it is to include values in the error message
return x
try:
return x.__class__.__name__
except AttributeError:
return repr(type(x))
if exact == 'equiv':
if type(left) != type(right):
# allow equivalence of Int64Index/RangeIndex
types = set([type(left).__name__, type(right).__name__])
if len(types - set(['Int64Index', 'RangeIndex'])):
msg = '{0} classes are not equivalent'.format(obj)
raise_assert_detail(obj, msg, repr_class(left),
repr_class(right))
elif exact:
if type(left) != type(right):
msg = '{0} classes are different'.format(obj)
raise_assert_detail(obj, msg, repr_class(left),
repr_class(right))
def assert_attr_equal(attr, left, right, obj='Attributes'):
"""checks attributes are equal. Both objects must have attribute.
Parameters
----------
attr : str
Attribute name being compared.
left : object
right : object
obj : str, default 'Attributes'
Specify object name being compared, internally used to show appropriate
assertion message
"""
left_attr = getattr(left, attr)
right_attr = getattr(right, attr)
if left_attr is right_attr:
return True
elif (is_number(left_attr) and np.isnan(left_attr) and
is_number(right_attr) and np.isnan(right_attr)):
# np.nan
return True
try:
result = left_attr == right_attr
except TypeError:
# datetimetz on rhs may raise TypeError
result = False
if not isinstance(result, bool):
result = result.all()
if result:
return True
else:
raise_assert_detail(obj, 'Attribute "{0}" are different'.format(attr),
left_attr, right_attr)
def assert_is_valid_plot_return_object(objs):
import matplotlib.pyplot as plt
if isinstance(objs, (pd.Series, np.ndarray)):
for el in objs.ravel():
msg = ('one of \'objs\' is not a matplotlib Axes instance, '
'type encountered {0!r}')
assert isinstance(el, (plt.Axes, dict)), msg.format(
el.__class__.__name__)
else:
assert isinstance(objs, (plt.Artist, tuple, dict)), \
('objs is neither an ndarray of Artist instances nor a '
'single Artist instance, tuple, or dict, "objs" is a {0!r} '
''.format(objs.__class__.__name__))
def isiterable(obj):
return hasattr(obj, '__iter__')
def is_sorted(seq):
if isinstance(seq, (Index, Series)):
seq = seq.values
# sorting does not change precisions
return assert_numpy_array_equal(seq, np.sort(np.array(seq)))
def assert_categorical_equal(left, right, check_dtype=True,
obj='Categorical', check_category_order=True):
"""Test that Categoricals are equivalent.
Parameters
----------
left, right : Categorical
Categoricals to compare
check_dtype : bool, default True
Check that integer dtype of the codes are the same
obj : str, default 'Categorical'
Specify object name being compared, internally used to show appropriate
assertion message
check_category_order : bool, default True
Whether the order of the categories should be compared, which
implies identical integer codes. If False, only the resulting
values are compared. The ordered attribute is
checked regardless.
"""
_check_isinstance(left, right, Categorical)
if check_category_order:
assert_index_equal(left.categories, right.categories,
obj='{0}.categories'.format(obj))
assert_numpy_array_equal(left.codes, right.codes,
check_dtype=check_dtype,
obj='{0}.codes'.format(obj))
else:
assert_index_equal(left.categories.sort_values(),
right.categories.sort_values(),
obj='{0}.categories'.format(obj))
assert_index_equal(left.categories.take(left.codes),
right.categories.take(right.codes),
obj='{0}.values'.format(obj))
assert_attr_equal('ordered', left, right, obj=obj)
def raise_assert_detail(obj, message, left, right, diff=None):
if isinstance(left, np.ndarray):
left = pprint_thing(left)
if isinstance(right, np.ndarray):
right = pprint_thing(right)
msg = """{0} are different
{1}
[left]: {2}
[right]: {3}""".format(obj, message, left, right)
if diff is not None:
msg = msg + "\n[diff]: {diff}".format(diff=diff)
raise AssertionError(msg)
def assert_numpy_array_equal(left, right, strict_nan=False,
check_dtype=True, err_msg=None,
obj='numpy array', check_same=None):
""" Checks that 'np.ndarray' is equivalent
Parameters
----------
left : np.ndarray or iterable
right : np.ndarray or iterable
strict_nan : bool, default False
If True, consider NaN and None to be different.
check_dtype: bool, default True
check dtype if both a and b are np.ndarray
err_msg : str, default None
If provided, used as assertion message
obj : str, default 'numpy array'
Specify object name being compared, internally used to show appropriate
assertion message
check_same : None|'copy'|'same', default None
Ensure left and right refer/do not refer to the same memory area
"""
# instance validation
# Show a detailed error message when classes are different
assert_class_equal(left, right, obj=obj)
# both classes must be an np.ndarray
_check_isinstance(left, right, np.ndarray)
def _get_base(obj):
return obj.base if getattr(obj, 'base', None) is not None else obj
left_base = _get_base(left)
right_base = _get_base(right)
if check_same == 'same':
if left_base is not right_base:
msg = "%r is not %r" % (left_base, right_base)
raise AssertionError(msg)
elif check_same == 'copy':
if left_base is right_base:
msg = "%r is %r" % (left_base, right_base)
raise AssertionError(msg)
def _raise(left, right, err_msg):
if err_msg is None:
if left.shape != right.shape:
raise_assert_detail(obj, '{0} shapes are different'
.format(obj), left.shape, right.shape)
diff = 0
for l, r in zip(left, right):
# count up differences
if not array_equivalent(l, r, strict_nan=strict_nan):
diff += 1
diff = diff * 100.0 / left.size
msg = '{0} values are different ({1} %)'\
.format(obj, np.round(diff, 5))
raise_assert_detail(obj, msg, left, right)
raise AssertionError(err_msg)
# compare shape and values
if not array_equivalent(left, right, strict_nan=strict_nan):
_raise(left, right, err_msg)
if check_dtype:
if isinstance(left, np.ndarray) and isinstance(right, np.ndarray):
assert_attr_equal('dtype', left, right, obj=obj)
return True
# This could be refactored to use the NDFrame.equals method
def assert_series_equal(left, right, check_dtype=True,
check_index_type='equiv',
check_series_type=True,
check_less_precise=False,
check_names=True,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
obj='Series'):
"""Check that left and right Series are equal.
Parameters
----------
left : Series
right : Series
check_dtype : bool, default True
Whether to check the Series dtype is identical.
check_index_type : bool / string {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_series_type : bool, default True
Whether to check the Series class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare
check_exact : bool, default False
Whether to compare number exactly.
check_names : bool, default True
Whether to check the Series and Index names attribute.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
obj : str, default 'Series'
Specify object name being compared, internally used to show appropriate
assertion message
"""
# instance validation
_check_isinstance(left, right, Series)
if check_series_type:
# ToDo: There are some tests using rhs is sparse
# lhs is dense. Should use assert_class_equal in future
assert isinstance(left, type(right))
# assert_class_equal(left, right, obj=obj)
# length comparison
if len(left) != len(right):
raise_assert_detail(obj, 'Series length are different',
'{0}, {1}'.format(len(left), left.index),
'{0}, {1}'.format(len(right), right.index))
# index comparison
assert_index_equal(left.index, right.index, exact=check_index_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj='{0}.index'.format(obj))
if check_dtype:
assert_attr_equal('dtype', left, right)
if check_exact:
assert_numpy_array_equal(left.get_values(), right.get_values(),
check_dtype=check_dtype,
obj='{0}'.format(obj),)
elif check_datetimelike_compat:
# we want to check only if we have compat dtypes
# e.g. integer and M|m are NOT compat, but we can simply check
# the values in that case
if (is_datetimelike_v_numeric(left, right) or
is_datetimelike_v_object(left, right) or
needs_i8_conversion(left) or
needs_i8_conversion(right)):
# datetimelike may have different objects (e.g. datetime.datetime
# vs Timestamp) but will compare equal
if not Index(left.values).equals(Index(right.values)):
msg = '[datetimelike_compat=True] {0} is not equal to {1}.'
raise AssertionError(msg.format(left.values, right.values))
else:
assert_numpy_array_equal(left.get_values(), right.get_values(),
check_dtype=check_dtype)
elif is_interval_dtype(left) or is_interval_dtype(right):
# TODO: big hack here
l = pd.IntervalIndex(left)
r = pd.IntervalIndex(right)
assert_index_equal(l, r, obj='{0}.index'.format(obj))
else:
_testing.assert_almost_equal(left.get_values(), right.get_values(),
check_less_precise=check_less_precise,
check_dtype=check_dtype,
obj='{0}'.format(obj))
# metadata comparison
if check_names:
assert_attr_equal('name', left, right, obj=obj)
if check_categorical:
if is_categorical_dtype(left) or is_categorical_dtype(right):
assert_categorical_equal(left.values, right.values,
obj='{0} category'.format(obj))
# This could be refactored to use the NDFrame.equals method
def assert_frame_equal(left, right, check_dtype=True,
check_index_type='equiv',
check_column_type='equiv',
check_frame_type=True,
check_less_precise=False,
check_names=True,
by_blocks=False,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
check_like=False,
obj='DataFrame'):
"""Check that left and right DataFrame are equal.
Parameters
----------
left : DataFrame
right : DataFrame
check_dtype : bool, default True
Whether to check the DataFrame dtype is identical.
check_index_type : bool / string {'equiv'}, default False
Whether to check the Index class, dtype and inferred_type
are identical.
check_column_type : bool / string {'equiv'}, default False
Whether to check the columns class, dtype and inferred_type
are identical.
check_frame_type : bool, default False
Whether to check the DataFrame class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare
check_names : bool, default True
Whether to check the Index names attribute.
by_blocks : bool, default False
Specify how to compare internal data. If False, compare by columns.
If True, compare by blocks.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_like : bool, default False
If true, ignore the order of rows & columns
obj : str, default 'DataFrame'
Specify object name being compared, internally used to show appropriate
assertion message
"""
# instance validation
_check_isinstance(left, right, DataFrame)
if check_frame_type:
# ToDo: There are some tests using rhs is SparseDataFrame
# lhs is DataFrame. Should use assert_class_equal in future
assert isinstance(left, type(right))
# assert_class_equal(left, right, obj=obj)
# shape comparison
if left.shape != right.shape:
raise_assert_detail(obj,
'DataFrame shape mismatch',
'({0}, {1})'.format(*left.shape),
'({0}, {1})'.format(*right.shape))
if check_like:
left, right = left.reindex_like(right), right
# index comparison
assert_index_equal(left.index, right.index, exact=check_index_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj='{0}.index'.format(obj))
# column comparison
assert_index_equal(left.columns, right.columns, exact=check_column_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj='{0}.columns'.format(obj))
# compare by blocks
if by_blocks:
rblocks = right.blocks
lblocks = left.blocks
for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))):
assert dtype in lblocks
assert dtype in rblocks
assert_frame_equal(lblocks[dtype], rblocks[dtype],
check_dtype=check_dtype, obj='DataFrame.blocks')
# compare by columns
else:
for i, col in enumerate(left.columns):
assert col in right
lcol = left.iloc[:, i]
rcol = right.iloc[:, i]
assert_series_equal(
lcol, rcol, check_dtype=check_dtype,
check_index_type=check_index_type,
check_less_precise=check_less_precise,
check_exact=check_exact, check_names=check_names,
check_datetimelike_compat=check_datetimelike_compat,
check_categorical=check_categorical,
obj='DataFrame.iloc[:, {0}]'.format(i))
def assert_panelnd_equal(left, right,
check_dtype=True,
check_panel_type=False,
check_less_precise=False,
assert_func=assert_frame_equal,
check_names=False,
by_blocks=False,
obj='Panel'):
"""Check that left and right Panels are equal.
Parameters
----------
left : Panel (or nd)
right : Panel (or nd)
check_dtype : bool, default True
Whether to check the Panel dtype is identical.
check_panel_type : bool, default False
Whether to check the Panel class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare
assert_func : function for comparing data
check_names : bool, default True
Whether to check the Index names attribute.
by_blocks : bool, default False
Specify how to compare internal data. If False, compare by columns.
If True, compare by blocks.
obj : str, default 'Panel'
Specify the object name being compared, internally used to show
the appropriate assertion message.
"""
if check_panel_type:
assert_class_equal(left, right, obj=obj)
for axis in left._AXIS_ORDERS:
left_ind = getattr(left, axis)
right_ind = getattr(right, axis)
assert_index_equal(left_ind, right_ind, check_names=check_names)
if by_blocks:
rblocks = right.blocks
lblocks = left.blocks
for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))):
assert dtype in lblocks
assert dtype in rblocks
array_equivalent(lblocks[dtype].values, rblocks[dtype].values)
else:
# can potentially be slow
for i, item in enumerate(left._get_axis(0)):
assert item in right, "non-matching item (right) '%s'" % item
litem = left.iloc[i]
ritem = right.iloc[i]
assert_func(litem, ritem, check_less_precise=check_less_precise)
for i, item in enumerate(right._get_axis(0)):
assert item in left, "non-matching item (left) '%s'" % item
# TODO: strangely check_names fails in py3 ?
_panel_frame_equal = partial(assert_frame_equal, check_names=False)
assert_panel_equal = partial(assert_panelnd_equal,
assert_func=_panel_frame_equal)
assert_panel4d_equal = partial(assert_panelnd_equal,
assert_func=assert_panel_equal)
# -----------------------------------------------------------------------------
# Sparse
def assert_sp_array_equal(left, right, check_dtype=True):
"""Check that the left and right SparseArray are equal.
Parameters
----------
left : SparseArray
right : SparseArray
check_dtype : bool, default True
Whether to check the data dtype is identical.
"""
_check_isinstance(left, right, pd.SparseArray)
assert_numpy_array_equal(left.sp_values, right.sp_values,
check_dtype=check_dtype)
# SparseIndex comparison
assert isinstance(left.sp_index, pd._libs.sparse.SparseIndex)
assert isinstance(right.sp_index, pd._libs.sparse.SparseIndex)
if not left.sp_index.equals(right.sp_index):
raise_assert_detail('SparseArray.index', 'index are not equal',
left.sp_index, right.sp_index)
assert_attr_equal('fill_value', left, right)
if check_dtype:
assert_attr_equal('dtype', left, right)
assert_numpy_array_equal(left.values, right.values,
check_dtype=check_dtype)
def assert_sp_series_equal(left, right, check_dtype=True, exact_indices=True,
check_series_type=True, check_names=True,
obj='SparseSeries'):
"""Check that the left and right SparseSeries are equal.
Parameters
----------
left : SparseSeries
right : SparseSeries
check_dtype : bool, default True
Whether to check the Series dtype is identical.
exact_indices : bool, default True
check_series_type : bool, default True
Whether to check the SparseSeries class is identical.
check_names : bool, default True
Whether to check the SparseSeries name attribute.
obj : str, default 'SparseSeries'
Specify the object name being compared, internally used to show
the appropriate assertion message.
"""
_check_isinstance(left, right, pd.SparseSeries)
if check_series_type:
assert_class_equal(left, right, obj=obj)
assert_index_equal(left.index, right.index,
obj='{0}.index'.format(obj))
assert_sp_array_equal(left.block.values, right.block.values)
if check_names:
assert_attr_equal('name', left, right)
if check_dtype:
assert_attr_equal('dtype', left, right)
assert_numpy_array_equal(left.values, right.values)
def assert_sp_frame_equal(left, right, check_dtype=True, exact_indices=True,
check_frame_type=True, obj='SparseDataFrame'):
"""Check that the left and right SparseDataFrame are equal.
Parameters
----------
left : SparseDataFrame
right : SparseDataFrame
check_dtype : bool, default True
Whether to check the Series dtype is identical.
exact_indices : bool, default True
SparseSeries SparseIndex objects must be exactly the same,
otherwise just compare dense representations.
check_frame_type : bool, default True
Whether to check the SparseDataFrame class is identical.
obj : str, default 'SparseDataFrame'
Specify the object name being compared, internally used to show
the appropriate assertion message.
"""
_check_isinstance(left, right, pd.SparseDataFrame)
if check_frame_type:
assert_class_equal(left, right, obj=obj)
assert_index_equal(left.index, right.index,
obj='{0}.index'.format(obj))
assert_index_equal(left.columns, right.columns,
obj='{0}.columns'.format(obj))
for col, series in compat.iteritems(left):
assert (col in right)
# trade-off?
if exact_indices:
assert_sp_series_equal(series, right[col],
check_dtype=check_dtype)
else:
assert_series_equal(series.to_dense(), right[col].to_dense(),
check_dtype=check_dtype)
assert_attr_equal('default_fill_value', left, right, obj=obj)
# do I care?
# assert(left.default_kind == right.default_kind)
for col in right:
assert (col in left)
def assert_sp_list_equal(left, right):
assert isinstance(left, pd.SparseList)
assert isinstance(right, pd.SparseList)
assert_sp_array_equal(left.to_array(), right.to_array())
# -----------------------------------------------------------------------------
# Others
def assert_contains_all(iterable, dic):
for k in iterable:
assert k in dic, "Did not contain item: '%r'" % k
def assert_copy(iter1, iter2, **eql_kwargs):
"""
iter1, iter2: iterables that produce elements
comparable with assert_almost_equal
Checks that the elements are equal, but not
the same object. (Does not check that items
in sequences are also not the same object)
"""
for elem1, elem2 in zip(iter1, iter2):
assert_almost_equal(elem1, elem2, **eql_kwargs)
assert elem1 is not elem2, ("Expected object %r and "
"object %r to be different "
"objects, were same."
% (type(elem1), type(elem2)))
def getCols(k):
return string.ascii_uppercase[:k]
def getArangeMat():
return np.arange(N * K).reshape((N, K))
# make index
def makeStringIndex(k=10, name=None):
return Index(rands_array(nchars=10, size=k), name=name)
def makeUnicodeIndex(k=10, name=None):
return Index(randu_array(nchars=10, size=k), name=name)
def makeCategoricalIndex(k=10, n=3, name=None):
""" make a length k index or n categories """
x = rands_array(nchars=4, size=n)
return CategoricalIndex(np.random.choice(x, k), name=name)
def makeIntervalIndex(k=10, name=None):
""" make a length k IntervalIndex """
x = np.linspace(0, 100, num=(k + 1))
return IntervalIndex.from_breaks(x, name=name)
def makeBoolIndex(k=10, name=None):
if k == 1:
return Index([True], name=name)
elif k == 2:
return Index([False, True], name=name)
return Index([False, True] + [False] * (k - 2), name=name)
def makeIntIndex(k=10, name=None):
return Index(lrange(k), name=name)
def makeUIntIndex(k=10, name=None):
return Index([2**63 + i for i in lrange(k)], name=name)
def makeRangeIndex(k=10, name=None):
return RangeIndex(0, k, 1, name=name)
def makeFloatIndex(k=10, name=None):
values = sorted(np.random.random_sample(k)) - np.random.random_sample(1)
return Index(values * (10 ** np.random.randint(0, 9)), name=name)
def makeDateIndex(k=10, freq='B', name=None):
dt = datetime(2000, 1, 1)
dr = bdate_range(dt, periods=k, freq=freq, name=name)
return DatetimeIndex(dr, name=name)
def makeTimedeltaIndex(k=10, freq='D', name=None):
return TimedeltaIndex(start='1 day', periods=k, freq=freq, name=name)
def makePeriodIndex(k=10, name=None):
dt = datetime(2000, 1, 1)
dr = PeriodIndex(start=dt, periods=k, freq='B', name=name)
return dr
def all_index_generator(k=10):
"""Generator which can be iterated over to get instances of all the various
index classes.
Parameters
----------
k: length of each of the index instances
"""
all_make_index_funcs = [makeIntIndex, makeFloatIndex, makeStringIndex,
makeUnicodeIndex, makeDateIndex, makePeriodIndex,
makeTimedeltaIndex, makeBoolIndex,
makeCategoricalIndex]
for make_index_func in all_make_index_funcs:
yield make_index_func(k=k)
def all_timeseries_index_generator(k=10):
"""Generator which can be iterated over to get instances of all the classes
which represent time-seires.
Parameters
----------
k: length of each of the index instances
"""
make_index_funcs = [makeDateIndex, makePeriodIndex, makeTimedeltaIndex]
for make_index_func in make_index_funcs:
yield make_index_func(k=k)
# make series
def makeFloatSeries(name=None):
index = makeStringIndex(N)
return Series(randn(N), index=index, name=name)
def makeStringSeries(name=None):
index = makeStringIndex(N)
return Series(randn(N), index=index, name=name)
def makeObjectSeries(name=None):
dateIndex = makeDateIndex(N)
dateIndex = Index(dateIndex, dtype=object)
index = makeStringIndex(N)
return Series(dateIndex, index=index, name=name)
def getSeriesData():
index = makeStringIndex(N)
return dict((c, Series(randn(N), index=index)) for c in getCols(K))
def makeTimeSeries(nper=None, freq='B', name=None):
if nper is None:
nper = N
return Series(randn(nper), index=makeDateIndex(nper, freq=freq), name=name)
def makePeriodSeries(nper=None, name=None):
if nper is None:
nper = N
return Series(randn(nper), index=makePeriodIndex(nper), name=name)
def getTimeSeriesData(nper=None, freq='B'):
return dict((c, makeTimeSeries(nper, freq)) for c in getCols(K))
def getPeriodData(nper=None):
return dict((c, makePeriodSeries(nper)) for c in getCols(K))
# make frame
def makeTimeDataFrame(nper=None, freq='B'):
data = getTimeSeriesData(nper, freq)
return DataFrame(data)
def makeDataFrame():
data = getSeriesData()
return DataFrame(data)
def getMixedTypeDict():
index = Index(['a', 'b', 'c', 'd', 'e'])
data = {
'A': [0., 1., 2., 3., 4.],
'B': [0., 1., 0., 1., 0.],
'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],
'D': bdate_range('1/1/2009', periods=5)
}
return index, data
def makeMixedDataFrame():
return DataFrame(getMixedTypeDict()[1])
def makePeriodFrame(nper=None):
data = getPeriodData(nper)
return DataFrame(data)
def makePanel(nper=None):
cols = ['Item' + c for c in string.ascii_uppercase[:K - 1]]
data = dict((c, makeTimeDataFrame(nper)) for c in cols)
return Panel.fromDict(data)
def makePeriodPanel(nper=None):
cols = ['Item' + c for c in string.ascii_uppercase[:K - 1]]
data = dict((c, makePeriodFrame(nper)) for c in cols)
return Panel.fromDict(data)
def makePanel4D(nper=None):
with warnings.catch_warnings(record=True):
d = dict(l1=makePanel(nper), l2=makePanel(nper),
l3=makePanel(nper))
return Panel4D(d)
def makeCustomIndex(nentries, nlevels, prefix='#', names=False, ndupe_l=None,
idx_type=None):
"""Create an index/multindex with given dimensions, levels, names, etc'
nentries - number of entries in index
nlevels - number of levels (> 1 produces multindex)
prefix - a string prefix for labels
names - (Optional), bool or list of strings. if True will use default
names, if false will use no names, if a list is given, the name of
each level in the index will be taken from the list.
ndupe_l - (Optional), list of ints, the number of rows for which the
label will repeated at the corresponding level, you can specify just
the first few, the rest will use the default ndupe_l of 1.
len(ndupe_l) <= nlevels.
idx_type - "i"/"f"/"s"/"u"/"dt"/"p"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a datetime index.
if unspecified, string labels will be generated.
"""
if ndupe_l is None:
ndupe_l = [1] * nlevels
assert (is_sequence(ndupe_l) and len(ndupe_l) <= nlevels)
assert (names is None or names is False or
names is True or len(names) is nlevels)
assert idx_type is None or \
(idx_type in ('i', 'f', 's', 'u', 'dt', 'p', 'td') and nlevels == 1)
if names is True:
# build default names
names = [prefix + str(i) for i in range(nlevels)]
if names is False:
# pass None to index constructor for no name
names = None
# make singelton case uniform
if isinstance(names, compat.string_types) and nlevels == 1:
names = [names]
# specific 1D index type requested?
idx_func = dict(i=makeIntIndex, f=makeFloatIndex,
s=makeStringIndex, u=makeUnicodeIndex,
dt=makeDateIndex, td=makeTimedeltaIndex,
p=makePeriodIndex).get(idx_type)
if idx_func:
idx = idx_func(nentries)
# but we need to fill in the name
if names:
idx.name = names[0]
return idx
elif idx_type is not None:
raise ValueError('"%s" is not a legal value for `idx_type`, use '
'"i"/"f"/"s"/"u"/"dt/"p"/"td".' % idx_type)
if len(ndupe_l) < nlevels:
ndupe_l.extend([1] * (nlevels - len(ndupe_l)))
assert len(ndupe_l) == nlevels
assert all([x > 0 for x in ndupe_l])
tuples = []
for i in range(nlevels):
def keyfunc(x):
import re
numeric_tuple = re.sub("[^\d_]_?", "", x).split("_")
return lmap(int, numeric_tuple)
# build a list of lists to create the index from
div_factor = nentries // ndupe_l[i] + 1
cnt = Counter()
for j in range(div_factor):
label = prefix + '_l%d_g' % i + str(j)
cnt[label] = ndupe_l[i]
# cute Counter trick
result = list(sorted(cnt.elements(), key=keyfunc))[:nentries]
tuples.append(result)
tuples = lzip(*tuples)
# convert tuples to index
if nentries == 1:
index = Index(tuples[0], name=names[0])
else:
index = MultiIndex.from_tuples(tuples, names=names)
return index
def makeCustomDataframe(nrows, ncols, c_idx_names=True, r_idx_names=True,
c_idx_nlevels=1, r_idx_nlevels=1, data_gen_f=None,
c_ndupe_l=None, r_ndupe_l=None, dtype=None,
c_idx_type=None, r_idx_type=None):
"""
nrows, ncols - number of data rows/cols
c_idx_names, idx_names - False/True/list of strings, yields No names ,
default names or uses the provided names for the levels of the
corresponding index. You can provide a single string when
c_idx_nlevels ==1.
c_idx_nlevels - number of levels in columns index. > 1 will yield MultiIndex
r_idx_nlevels - number of levels in rows index. > 1 will yield MultiIndex
data_gen_f - a function f(row,col) which return the data value
at that position, the default generator used yields values of the form
"RxCy" based on position.
c_ndupe_l, r_ndupe_l - list of integers, determines the number
of duplicates for each label at a given level of the corresponding
index. The default `None` value produces a multiplicity of 1 across
all levels, i.e. a unique index. Will accept a partial list of length
N < idx_nlevels, for just the first N levels. If ndupe doesn't divide
nrows/ncol, the last label might have lower multiplicity.
dtype - passed to the DataFrame constructor as is, in case you wish to
have more control in conjuncion with a custom `data_gen_f`
r_idx_type, c_idx_type - "i"/"f"/"s"/"u"/"dt"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a timedelta index.
if unspecified, string labels will be generated.
Examples:
# 5 row, 3 columns, default names on both, single index on both axis
>> makeCustomDataframe(5,3)
# make the data a random int between 1 and 100
>> mkdf(5,3,data_gen_f=lambda r,c:randint(1,100))
# 2-level multiindex on rows with each label duplicated
# twice on first level, default names on both axis, single
# index on both axis
>> a=makeCustomDataframe(5,3,r_idx_nlevels=2,r_ndupe_l=[2])
# DatetimeIndex on row, index with unicode labels on columns
# no names on either axis
>> a=makeCustomDataframe(5,3,c_idx_names=False,r_idx_names=False,
r_idx_type="dt",c_idx_type="u")
# 4-level multindex on rows with names provided, 2-level multindex
# on columns with default labels and default names.
>> a=makeCustomDataframe(5,3,r_idx_nlevels=4,
r_idx_names=["FEE","FI","FO","FAM"],
c_idx_nlevels=2)
>> a=mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
"""
assert c_idx_nlevels > 0
assert r_idx_nlevels > 0
assert r_idx_type is None or \
(r_idx_type in ('i', 'f', 's',
'u', 'dt', 'p', 'td') and r_idx_nlevels == 1)
assert c_idx_type is None or \
(c_idx_type in ('i', 'f', 's',
'u', 'dt', 'p', 'td') and c_idx_nlevels == 1)
columns = makeCustomIndex(ncols, nlevels=c_idx_nlevels, prefix='C',
names=c_idx_names, ndupe_l=c_ndupe_l,
idx_type=c_idx_type)
index = makeCustomIndex(nrows, nlevels=r_idx_nlevels, prefix='R',
names=r_idx_names, ndupe_l=r_ndupe_l,
idx_type=r_idx_type)
# by default, generate data based on location
if data_gen_f is None:
data_gen_f = lambda r, c: "R%dC%d" % (r, c)
data = [[data_gen_f(r, c) for c in range(ncols)] for r in range(nrows)]
return DataFrame(data, index, columns, dtype=dtype)
def _create_missing_idx(nrows, ncols, density, random_state=None):
if random_state is None:
random_state = np.random
else:
random_state = np.random.RandomState(random_state)
# below is cribbed from scipy.sparse
size = int(np.round((1 - density) * nrows * ncols))
# generate a few more to ensure unique values
min_rows = 5
fac = 1.02
extra_size = min(size + min_rows, fac * size)
def _gen_unique_rand(rng, _extra_size):
ind = rng.rand(int(_extra_size))
return np.unique(np.floor(ind * nrows * ncols))[:size]
ind = _gen_unique_rand(random_state, extra_size)
while ind.size < size:
extra_size *= 1.05
ind = _gen_unique_rand(random_state, extra_size)
j = np.floor(ind * 1. / nrows).astype(int)
i = (ind - j * nrows).astype(int)
return i.tolist(), j.tolist()
def makeMissingCustomDataframe(nrows, ncols, density=.9, random_state=None,
c_idx_names=True, r_idx_names=True,
c_idx_nlevels=1, r_idx_nlevels=1,
data_gen_f=None,
c_ndupe_l=None, r_ndupe_l=None, dtype=None,
c_idx_type=None, r_idx_type=None):
"""
Parameters
----------
Density : float, optional
Float in (0, 1) that gives the percentage of non-missing numbers in
the DataFrame.
random_state : {np.random.RandomState, int}, optional
Random number generator or random seed.
See makeCustomDataframe for descriptions of the rest of the parameters.
"""
df = makeCustomDataframe(nrows, ncols, c_idx_names=c_idx_names,
r_idx_names=r_idx_names,
c_idx_nlevels=c_idx_nlevels,
r_idx_nlevels=r_idx_nlevels,
data_gen_f=data_gen_f,
c_ndupe_l=c_ndupe_l, r_ndupe_l=r_ndupe_l,
dtype=dtype, c_idx_type=c_idx_type,
r_idx_type=r_idx_type)
i, j = _create_missing_idx(nrows, ncols, density, random_state)
df.values[i, j] = np.nan
return df
def makeMissingDataframe(density=.9, random_state=None):
df = makeDataFrame()
i, j = _create_missing_idx(*df.shape, density=density,
random_state=random_state)
df.values[i, j] = np.nan
return df
def add_nans(panel):
I, J, N = panel.shape
for i, item in enumerate(panel.items):
dm = panel[item]
for j, col in enumerate(dm.columns):
dm[col][:i + j] = np.NaN
return panel
def add_nans_panel4d(panel4d):
for l, label in enumerate(panel4d.labels):
panel = panel4d[label]
add_nans(panel)
return panel4d
class TestSubDict(dict):
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
# Dependency checker when running tests.
#
# Copied this from nipy/nipype
# Copyright of respective developers, License: BSD-3
def skip_if_no_package(pkg_name, min_version=None, max_version=None,
app='pandas', checker=LooseVersion):
"""Check that the min/max version of the required package is installed.
If the package check fails, the test is automatically skipped.
Parameters
----------
pkg_name : string
Name of the required package.
min_version : string, optional
Minimal version number for required package.
max_version : string, optional
Max version number for required package.
app : string, optional
Application that is performing the check. For instance, the
name of the tutorial being executed that depends on specific
packages.
checker : object, optional
The class that will perform the version checking. Default is
distutils.version.LooseVersion.
Examples
--------
package_check('numpy', '1.3')
"""
import pytest
if app:
msg = '%s requires %s' % (app, pkg_name)
else:
msg = 'module requires %s' % pkg_name
if min_version:
msg += ' with version >= %s' % (min_version,)
if max_version:
msg += ' with version < %s' % (max_version,)
try:
mod = __import__(pkg_name)
except ImportError:
mod = None
try:
have_version = mod.__version__
except AttributeError:
pytest.skip('Cannot find version for %s' % pkg_name)
if min_version and checker(have_version) < checker(min_version):
pytest.skip(msg)
if max_version and checker(have_version) >= checker(max_version):
pytest.skip(msg)
def optional_args(decorator):
"""allows a decorator to take optional positional and keyword arguments.
Assumes that taking a single, callable, positional argument means that
it is decorating a function, i.e. something like this::
@my_decorator
def function(): pass
Calls decorator with decorator(f, *args, **kwargs)"""
@wraps(decorator)
def wrapper(*args, **kwargs):
def dec(f):
return decorator(f, *args, **kwargs)
is_decorating = not kwargs and len(args) == 1 and callable(args[0])
if is_decorating:
f = args[0]
args = []
return dec(f)
else:
return dec
return wrapper
# skip tests on exceptions with this message
_network_error_messages = (
# 'urlopen error timed out',
# 'timeout: timed out',
# 'socket.timeout: timed out',
'timed out',
'Server Hangup',
'HTTP Error 503: Service Unavailable',
'502: Proxy Error',
'HTTP Error 502: internal error',
'HTTP Error 502',
'HTTP Error 503',
'HTTP Error 403',
'HTTP Error 400',
'Temporary failure in name resolution',
'Name or service not known',
'Connection refused',
'certificate verify',
)
# or this e.errno/e.reason.errno
_network_errno_vals = (
101, # Network is unreachable
111, # Connection refused
110, # Connection timed out
104, # Connection reset Error
54, # Connection reset by peer
60, # urllib.error.URLError: [Errno 60] Connection timed out
)
# Both of the above shouldn't mask real issues such as 404's
# or refused connections (changed DNS).
# But some tests (test_data yahoo) contact incredibly flakey
# servers.
# and conditionally raise on these exception types
_network_error_classes = (IOError, httplib.HTTPException)
if sys.version_info >= (3, 3):
_network_error_classes += (TimeoutError,) # noqa
def can_connect(url, error_classes=_network_error_classes):
"""Try to connect to the given url. True if succeeds, False if IOError
raised
Parameters
----------
url : basestring
The URL to try to connect to
Returns
-------
connectable : bool
Return True if no IOError (unable to connect) or URLError (bad url) was
raised
"""
try:
with urlopen(url):
pass
except error_classes:
return False
else:
return True
@optional_args
def network(t, url="http://www.google.com",
raise_on_error=_RAISE_NETWORK_ERROR_DEFAULT,
check_before_test=False,
error_classes=_network_error_classes,
skip_errnos=_network_errno_vals,
_skip_on_messages=_network_error_messages,
):
"""
Label a test as requiring network connection and, if an error is
encountered, only raise if it does not find a network connection.
In comparison to ``network``, this assumes an added contract to your test:
you must assert that, under normal conditions, your test will ONLY fail if
it does not have network connectivity.
You can call this in 3 ways: as a standard decorator, with keyword
arguments, or with a positional argument that is the url to check.
Parameters
----------
t : callable
The test requiring network connectivity.
url : path
The url to test via ``pandas.io.common.urlopen`` to check
for connectivity. Defaults to 'http://www.google.com'.
raise_on_error : bool
If True, never catches errors.
check_before_test : bool
If True, checks connectivity before running the test case.
error_classes : tuple or Exception
error classes to ignore. If not in ``error_classes``, raises the error.
defaults to IOError. Be careful about changing the error classes here.
skip_errnos : iterable of int
Any exception that has .errno or .reason.erno set to one
of these values will be skipped with an appropriate
message.
_skip_on_messages: iterable of string
any exception e for which one of the strings is
a substring of str(e) will be skipped with an appropriate
message. Intended to supress errors where an errno isn't available.
Notes
-----
* ``raise_on_error`` supercedes ``check_before_test``
Returns
-------
t : callable
The decorated test ``t``, with checks for connectivity errors.
Example
-------
Tests decorated with @network will fail if it's possible to make a network
connection to another URL (defaults to google.com)::
>>> from pandas.util.testing import network
>>> from pandas.io.common import urlopen
>>> @network
... def test_network():
... with urlopen("rabbit://bonanza.com"):
... pass
Traceback
...
URLError: <urlopen error unknown url type: rabit>
You can specify alternative URLs::
>>> @network("http://www.yahoo.com")
... def test_something_with_yahoo():
... raise IOError("Failure Message")
>>> test_something_with_yahoo()
Traceback (most recent call last):
...
IOError: Failure Message
If you set check_before_test, it will check the url first and not run the
test on failure::
>>> @network("failing://url.blaher", check_before_test=True)
... def test_something():
... print("I ran!")
... raise ValueError("Failure")
>>> test_something()
Traceback (most recent call last):
...
Errors not related to networking will always be raised.
"""
from pytest import skip
t.network = True
@wraps(t)
def wrapper(*args, **kwargs):
if check_before_test and not raise_on_error:
if not can_connect(url, error_classes):
skip()
try:
return t(*args, **kwargs)
except Exception as e:
errno = getattr(e, 'errno', None)
if not errno and hasattr(errno, "reason"):
errno = getattr(e.reason, 'errno', None)
if errno in skip_errnos:
skip("Skipping test due to known errno"
" and error %s" % e)
try:
e_str = traceback.format_exc(e)
except:
e_str = str(e)
if any([m.lower() in e_str.lower() for m in _skip_on_messages]):
skip("Skipping test because exception "
"message is known and error %s" % e)
if not isinstance(e, error_classes):
raise
if raise_on_error or can_connect(url, error_classes):
raise
else:
skip("Skipping test due to lack of connectivity"
" and error %s" % e)
return wrapper
with_connectivity_check = network
class SimpleMock(object):
"""
Poor man's mocking object
Note: only works for new-style classes, assumes __getattribute__ exists.
>>> a = type("Duck",(),{})
>>> a.attr1,a.attr2 ="fizz","buzz"
>>> b = SimpleMock(a,"attr1","bar")
>>> b.attr1 == "bar" and b.attr2 == "buzz"
True
>>> a.attr1 == "fizz" and a.attr2 == "buzz"
True
"""
def __init__(self, obj, *args, **kwds):
assert(len(args) % 2 == 0)
attrs = kwds.get("attrs", {})
for k, v in zip(args[::2], args[1::2]):
# dict comprehensions break 2.6
attrs[k] = v
self.attrs = attrs
self.obj = obj
def __getattribute__(self, name):
attrs = object.__getattribute__(self, "attrs")
obj = object.__getattribute__(self, "obj")
return attrs.get(name, type(obj).__getattribute__(obj, name))
@contextmanager
def stdin_encoding(encoding=None):
"""
Context manager for running bits of code while emulating an arbitrary
stdin encoding.
>>> import sys
>>> _encoding = sys.stdin.encoding
>>> with stdin_encoding('AES'): sys.stdin.encoding
'AES'
>>> sys.stdin.encoding==_encoding
True
"""
import sys
_stdin = sys.stdin
sys.stdin = SimpleMock(sys.stdin, "encoding", encoding)
yield
sys.stdin = _stdin
def assert_raises_regex(_exception, _regexp, _callable=None,
*args, **kwargs):
"""
Check that the specified Exception is raised and that the error message
matches a given regular expression pattern. This may be a regular
expression object or a string containing a regular expression suitable
for use by `re.search()`.
This is a port of the `assertRaisesRegexp` function from unittest in
Python 2.7. However, with our migration to `pytest`, please refrain
from using this. Instead, use the following paradigm:
with pytest.raises(_exception) as exc_info:
func(*args, **kwargs)
exc_info.matches(reg_exp)
Examples
--------
>>> assert_raises_regex(ValueError, 'invalid literal for.*XYZ', int, 'XYZ')
>>> import re
>>> assert_raises_regex(ValueError, re.compile('literal'), int, 'XYZ')
If an exception of a different type is raised, it bubbles up.
>>> assert_raises_regex(TypeError, 'literal', int, 'XYZ')
Traceback (most recent call last):
...
ValueError: invalid literal for int() with base 10: 'XYZ'
>>> dct = dict()
>>> assert_raises_regex(KeyError, 'pear', dct.__getitem__, 'apple')
Traceback (most recent call last):
...
AssertionError: "pear" does not match "'apple'"
You can also use this in a with statement.
>>> with assert_raises_regex(TypeError, 'unsupported operand type\(s\)'):
... 1 + {}
>>> with assert_raises_regex(TypeError, 'banana'):
... 'apple'[0] = 'b'
Traceback (most recent call last):
...
AssertionError: "banana" does not match "'str' object does not support \
item assignment"
"""
manager = _AssertRaisesContextmanager(exception=_exception, regexp=_regexp)
if _callable is not None:
with manager:
_callable(*args, **kwargs)
else:
return manager
class _AssertRaisesContextmanager(object):
"""
Context manager behind `assert_raises_regex`.
"""
def __init__(self, exception, regexp=None):
"""
Initialize an _AssertRaisesContextManager instance.
Parameters
----------
exception : class
The expected Exception class.
regexp : str, default None
The regex to compare against the Exception message.
"""
self.exception = exception
if regexp is not None and not hasattr(regexp, "search"):
regexp = re.compile(regexp, re.DOTALL)
self.regexp = regexp
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, trace_back):
expected = self.exception
if not exc_type:
exp_name = getattr(expected, "__name__", str(expected))
raise AssertionError("{0} not raised.".format(exp_name))
return self.exception_matches(exc_type, exc_value, trace_back)
def exception_matches(self, exc_type, exc_value, trace_back):
"""
Check that the Exception raised matches the expected Exception
and expected error message regular expression.
Parameters
----------
exc_type : class
The type of Exception raised.
exc_value : Exception
The instance of `exc_type` raised.
trace_back : stack trace object
The traceback object associated with `exc_value`.
Returns
-------
is_matched : bool
Whether or not the Exception raised matches the expected
Exception class and expected error message regular expression.
Raises
------
AssertionError : The error message provided does not match
the expected error message regular expression.
"""
if issubclass(exc_type, self.exception):
if self.regexp is not None:
val = str(exc_value)
if not self.regexp.search(val):
e = AssertionError('"%s" does not match "%s"' %
(self.regexp.pattern, str(val)))
raise_with_traceback(e, trace_back)
return True
else:
# Failed, so allow Exception to bubble up.
return False
@contextmanager
def assert_produces_warning(expected_warning=Warning, filter_level="always",
clear=None, check_stacklevel=True):
"""
Context manager for running code that expects to raise (or not raise)
warnings. Checks that code raises the expected warning and only the
expected warning. Pass ``False`` or ``None`` to check that it does *not*
raise a warning. Defaults to ``exception.Warning``, baseclass of all
Warnings. (basically a wrapper around ``warnings.catch_warnings``).
>>> import warnings
>>> with assert_produces_warning():
... warnings.warn(UserWarning())
...
>>> with assert_produces_warning(False):
... warnings.warn(RuntimeWarning())
...
Traceback (most recent call last):
...
AssertionError: Caused unexpected warning(s): ['RuntimeWarning'].
>>> with assert_produces_warning(UserWarning):
... warnings.warn(RuntimeWarning())
Traceback (most recent call last):
...
AssertionError: Did not see expected warning of class 'UserWarning'.
..warn:: This is *not* thread-safe.
"""
with warnings.catch_warnings(record=True) as w:
if clear is not None:
# make sure that we are clearning these warnings
# if they have happened before
# to guarantee that we will catch them
if not is_list_like(clear):
clear = [clear]
for m in clear:
try:
m.__warningregistry__.clear()
except:
pass
saw_warning = False
warnings.simplefilter(filter_level)
yield w
extra_warnings = []
for actual_warning in w:
if (expected_warning and issubclass(actual_warning.category,
expected_warning)):
saw_warning = True
if check_stacklevel and issubclass(actual_warning.category,
(FutureWarning,
DeprecationWarning)):
from inspect import getframeinfo, stack
caller = getframeinfo(stack()[2][0])
msg = ("Warning not set with correct stacklevel. "
"File where warning is raised: {0} != {1}. "
"Warning message: {2}".format(
actual_warning.filename, caller.filename,
actual_warning.message))
assert actual_warning.filename == caller.filename, msg
else:
extra_warnings.append(actual_warning.category.__name__)
if expected_warning:
assert saw_warning, ("Did not see expected warning of class %r."
% expected_warning.__name__)
assert not extra_warnings, ("Caused unexpected warning(s): %r."
% extra_warnings)
class RNGContext(object):
"""
Context manager to set the numpy random number generator speed. Returns
to the original value upon exiting the context manager.
Parameters
----------
seed : int
Seed for numpy.random.seed
Examples
--------
with RNGContext(42):
np.random.randn()
"""
def __init__(self, seed):
self.seed = seed
def __enter__(self):
self.start_state = np.random.get_state()
np.random.seed(self.seed)
def __exit__(self, exc_type, exc_value, traceback):
np.random.set_state(self.start_state)
@contextmanager
def use_numexpr(use, min_elements=expr._MIN_ELEMENTS):
olduse = expr._USE_NUMEXPR
oldmin = expr._MIN_ELEMENTS
expr.set_use_numexpr(use)
expr._MIN_ELEMENTS = min_elements
yield
expr._MIN_ELEMENTS = oldmin
expr.set_use_numexpr(olduse)
def test_parallel(num_threads=2, kwargs_list=None):
"""Decorator to run the same function multiple times in parallel.
Parameters
----------
num_threads : int, optional
The number of times the function is run in parallel.
kwargs_list : list of dicts, optional
The list of kwargs to update original
function kwargs on different threads.
Notes
-----
This decorator does not pass the return value of the decorated function.
Original from scikit-image:
https://github.com/scikit-image/scikit-image/pull/1519
"""
assert num_threads > 0
has_kwargs_list = kwargs_list is not None
if has_kwargs_list:
assert len(kwargs_list) == num_threads
import threading
def wrapper(func):
@wraps(func)
def inner(*args, **kwargs):
if has_kwargs_list:
update_kwargs = lambda i: dict(kwargs, **kwargs_list[i])
else:
update_kwargs = lambda i: kwargs
threads = []
for i in range(num_threads):
updated_kwargs = update_kwargs(i)
thread = threading.Thread(target=func, args=args,
kwargs=updated_kwargs)
threads.append(thread)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return inner
return wrapper
class SubclassedSeries(Series):
_metadata = ['testattr', 'name']
@property
def _constructor(self):
return SubclassedSeries
@property
def _constructor_expanddim(self):
return SubclassedDataFrame
class SubclassedDataFrame(DataFrame):
_metadata = ['testattr']
@property
def _constructor(self):
return SubclassedDataFrame
@property
def _constructor_sliced(self):
return SubclassedSeries
class SubclassedSparseSeries(pd.SparseSeries):
_metadata = ['testattr']
@property
def _constructor(self):
return SubclassedSparseSeries
@property
def _constructor_expanddim(self):
return SubclassedSparseDataFrame
class SubclassedSparseDataFrame(pd.SparseDataFrame):
_metadata = ['testattr']
@property
def _constructor(self):
return SubclassedSparseDataFrame
@property
def _constructor_sliced(self):
return SubclassedSparseSeries
class SubclassedCategorical(Categorical):
@property
def _constructor(self):
return SubclassedCategorical
@contextmanager
def patch(ob, attr, value):
"""Temporarily patch an attribute of an object.
Parameters
----------
ob : any
The object to patch. This must support attribute assignment for `attr`.
attr : str
The name of the attribute to patch.
value : any
The temporary attribute to assign.
Examples
--------
>>> class C(object):
... attribute = 'original'
...
>>> C.attribute
'original'
>>> with patch(C, 'attribute', 'patched'):
... in_context = C.attribute
...
>>> in_context
'patched'
>>> C.attribute # the value is reset when the context manager exists
'original'
Correctly replaces attribute when the manager exits with an exception.
>>> with patch(C, 'attribute', 'patched'):
... in_context = C.attribute
... raise ValueError()
Traceback (most recent call last):
...
ValueError
>>> in_context
'patched'
>>> C.attribute
'original'
"""
noattr = object() # mark that the attribute never existed
old = getattr(ob, attr, noattr)
setattr(ob, attr, value)
try:
yield
finally:
if old is noattr:
delattr(ob, attr)
else:
setattr(ob, attr, old)
@contextmanager
def set_timezone(tz):
"""Context manager for temporarily setting a timezone.
Parameters
----------
tz : str
A string representing a valid timezone.
Examples
--------
>>> from datetime import datetime
>>> from dateutil.tz import tzlocal
>>> tzlocal().tzname(datetime.now())
'IST'
>>> with set_timezone('US/Eastern'):
... tzlocal().tzname(datetime.now())
...
'EDT'
"""
if is_platform_windows():
import pytest
pytest.skip("timezone setting not supported on windows")
import os
import time
def setTZ(tz):
if tz is None:
try:
del os.environ['TZ']
except:
pass
else:
os.environ['TZ'] = tz
time.tzset()
orig_tz = os.environ.get('TZ')
setTZ(tz)
try:
yield
finally:
setTZ(orig_tz)
| agpl-3.0 |
trmznt/msaf | msaf/lib/microsatellite/traceutils.py | 1 | 5746 |
from math import factorial
import numpy as np
def b(txt):
return txt.encode('UTF-8')
def smooth_signal( raw_signal ):
return savitzky_golay( raw_signal, 11, 7 )
def separate_channels( trace ):
# return a list of [ 'dye name', dye_wavelength, numpy_array, numpy_smooth ]
results = []
for (idx, data_idx) in [ (1,1), (2,2), (3,3), (4,4), (5,105) ]:
try:
dye_name = trace.get_data(b('DyeN%d' % idx)).decode('UTF-8')
dye_wavelength = trace.get_data(b('DyeW%d' % idx))
raw_channel = np.array( trace.get_data(b('DATA%d' % data_idx)) )
sg_channel = smooth_signal( raw_channel )
results.append( (dye_name, dye_wavelength, raw_channel, sg_channel) )
except KeyError:
pass
return results
def savitzky_golay(y, window_size, order, deriv=0, rate=1):
r"""Smooth (and optionally differentiate) data with a Savitzky-Golay filter.
The Savitzky-Golay filter removes high frequency noise from data.
It has the advantage of preserving the original shape and
features of the signal better than other types of filtering
approaches, such as moving averages techniques.
Parameters
----------
y : array_like, shape (N,)
the values of the time history of the signal.
window_size : int
the length of the window. Must be an odd integer number.
order : int
the order of the polynomial used in the filtering.
Must be less then `window_size` - 1.
deriv: int
the order of the derivative to compute (default = 0 means only smoothing)
Returns
-------
ys : ndarray, shape (N)
the smoothed signal (or it's n-th derivative).
Notes
-----
The Savitzky-Golay is a type of low-pass filter, particularly
suited for smoothing noisy data. The main idea behind this
approach is to make for each point a least-square fit with a
polynomial of high order over a odd-sized window centered at
the point.
Examples
--------
t = np.linspace(-4, 4, 500)
y = np.exp( -t**2 ) + np.random.normal(0, 0.05, t.shape)
ysg = savitzky_golay(y, window_size=31, order=4)
import matplotlib.pyplot as plt
plt.plot(t, y, label='Noisy signal')
plt.plot(t, np.exp(-t**2), 'k', lw=1.5, label='Original signal')
plt.plot(t, ysg, 'r', label='Filtered signal')
plt.legend()
plt.show()
References
----------
.. [1] A. Savitzky, M. J. E. Golay, Smoothing and Differentiation of
Data by Simplified Least Squares Procedures. Analytical
Chemistry, 1964, 36 (8), pp 1627-1639.
.. [2] Numerical Recipes 3rd Edition: The Art of Scientific Computing
W.H. Press, S.A. Teukolsky, W.T. Vetterling, B.P. Flannery
Cambridge University Press ISBN-13: 9780521880688
"""
import numpy as np
from math import factorial
try:
window_size = np.abs(np.int(window_size))
order = np.abs(np.int(order))
except ValueError as msg:
raise ValueError("window_size and order have to be of type int")
if window_size % 2 != 1 or window_size < 1:
raise TypeError("window_size size must be a positive odd number")
if window_size < order + 2:
raise TypeError("window_size is too small for the polynomials order")
order_range = range(order+1)
half_window = (window_size -1) // 2
# precompute coefficients
b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])
m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv)
# pad the signal at the extremes with
# values taken from the signal itself
firstvals = y[0] - np.abs( y[1:half_window+1][::-1] - y[0] )
lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1])
y = np.concatenate((firstvals, y, lastvals))
return np.convolve( m[::-1], y, mode='valid')
def smooth(x,window_len=11,window='hanning'):
"""smooth the data using a window with requested size.
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that transient parts are minimized
in the begining and end part of the output signal.
input:
x: the input signal
window_len: the dimension of the smoothing window; should be an odd integer
window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'
flat window will produce a moving average smoothing.
output:
the smoothed signal
example:
t=linspace(-2,2,0.1)
x=sin(t)+randn(len(t))*0.1
y=smooth(x)
see also:
numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve
scipy.signal.lfilter
TODO: the window parameter could be the window itself if an array instead of a string
NOTE: length(output) != length(input), to correct this: return y[(window_len/2-1):-(window_len/2)] instead of just y.
"""
if x.ndim != 1:
raise ValueError( "smooth only accepts 1 dimension arrays." )
if x.size < window_len:
raise ValueError( "Input vector needs to be bigger than window size." )
if window_len<3:
return x
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError( "Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'")
s=np.r_[x[window_len-1:0:-1],x,x[-1:-window_len:-1]]
#print(len(s))
if window == 'flat': #moving average
w=np.ones(window_len,'d')
else:
w=eval('np.'+window+'(window_len)')
y=np.convolve(w/w.sum(),s,mode='valid')
return y
| lgpl-3.0 |
jlegendary/scikit-learn | examples/gaussian_process/plot_gp_regression.py | 253 | 4054 | #!/usr/bin/python
# -*- coding: utf-8 -*-
r"""
=========================================================
Gaussian Processes regression: basic introductory example
=========================================================
A simple one-dimensional regression exercise computed in two different ways:
1. A noise-free case with a cubic correlation model
2. A noisy case with a squared Euclidean correlation model
In both cases, the model parameters are estimated using the maximum
likelihood principle.
The figures illustrate the interpolating property of the Gaussian Process
model as well as its probabilistic nature in the form of a pointwise 95%
confidence interval.
Note that the parameter ``nugget`` is applied as a Tikhonov regularization
of the assumed covariance between the training points. In the special case
of the squared euclidean correlation model, nugget is mathematically equivalent
to a normalized variance: That is
.. math::
\mathrm{nugget}_i = \left[\frac{\sigma_i}{y_i}\right]^2
"""
print(__doc__)
# Author: Vincent Dubourg <[email protected]>
# Jake Vanderplas <[email protected]>
# Licence: BSD 3 clause
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from matplotlib import pyplot as pl
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
#----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
# Observations
y = f(X).ravel()
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instanciate a Gaussian Process model
gp = GaussianProcess(corr='cubic', theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=100)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, MSE = gp.predict(x, eval_MSE=True)
sigma = np.sqrt(MSE)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = pl.figure()
pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
pl.plot(X, y, 'r.', markersize=10, label=u'Observations')
pl.plot(x, y_pred, 'b-', label=u'Prediction')
pl.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
pl.xlabel('$x$')
pl.ylabel('$f(x)$')
pl.ylim(-10, 20)
pl.legend(loc='upper left')
#----------------------------------------------------------------------
# now the noisy case
X = np.linspace(0.1, 9.9, 20)
X = np.atleast_2d(X).T
# Observations and noise
y = f(X).ravel()
dy = 0.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instanciate a Gaussian Process model
gp = GaussianProcess(corr='squared_exponential', theta0=1e-1,
thetaL=1e-3, thetaU=1,
nugget=(dy / y) ** 2,
random_start=100)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, MSE = gp.predict(x, eval_MSE=True)
sigma = np.sqrt(MSE)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = pl.figure()
pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
pl.errorbar(X.ravel(), y, dy, fmt='r.', markersize=10, label=u'Observations')
pl.plot(x, y_pred, 'b-', label=u'Prediction')
pl.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
pl.xlabel('$x$')
pl.ylabel('$f(x)$')
pl.ylim(-10, 20)
pl.legend(loc='upper left')
pl.show()
| bsd-3-clause |
Windy-Ground/scikit-learn | examples/gaussian_process/plot_gp_regression.py | 253 | 4054 | #!/usr/bin/python
# -*- coding: utf-8 -*-
r"""
=========================================================
Gaussian Processes regression: basic introductory example
=========================================================
A simple one-dimensional regression exercise computed in two different ways:
1. A noise-free case with a cubic correlation model
2. A noisy case with a squared Euclidean correlation model
In both cases, the model parameters are estimated using the maximum
likelihood principle.
The figures illustrate the interpolating property of the Gaussian Process
model as well as its probabilistic nature in the form of a pointwise 95%
confidence interval.
Note that the parameter ``nugget`` is applied as a Tikhonov regularization
of the assumed covariance between the training points. In the special case
of the squared euclidean correlation model, nugget is mathematically equivalent
to a normalized variance: That is
.. math::
\mathrm{nugget}_i = \left[\frac{\sigma_i}{y_i}\right]^2
"""
print(__doc__)
# Author: Vincent Dubourg <[email protected]>
# Jake Vanderplas <[email protected]>
# Licence: BSD 3 clause
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from matplotlib import pyplot as pl
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
#----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
# Observations
y = f(X).ravel()
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instanciate a Gaussian Process model
gp = GaussianProcess(corr='cubic', theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=100)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, MSE = gp.predict(x, eval_MSE=True)
sigma = np.sqrt(MSE)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = pl.figure()
pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
pl.plot(X, y, 'r.', markersize=10, label=u'Observations')
pl.plot(x, y_pred, 'b-', label=u'Prediction')
pl.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
pl.xlabel('$x$')
pl.ylabel('$f(x)$')
pl.ylim(-10, 20)
pl.legend(loc='upper left')
#----------------------------------------------------------------------
# now the noisy case
X = np.linspace(0.1, 9.9, 20)
X = np.atleast_2d(X).T
# Observations and noise
y = f(X).ravel()
dy = 0.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instanciate a Gaussian Process model
gp = GaussianProcess(corr='squared_exponential', theta0=1e-1,
thetaL=1e-3, thetaU=1,
nugget=(dy / y) ** 2,
random_start=100)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, MSE = gp.predict(x, eval_MSE=True)
sigma = np.sqrt(MSE)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = pl.figure()
pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
pl.errorbar(X.ravel(), y, dy, fmt='r.', markersize=10, label=u'Observations')
pl.plot(x, y_pred, 'b-', label=u'Prediction')
pl.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
pl.xlabel('$x$')
pl.ylabel('$f(x)$')
pl.ylim(-10, 20)
pl.legend(loc='upper left')
pl.show()
| bsd-3-clause |
michalkurka/h2o-3 | h2o-py/tests/testdir_scikit_grid/pyunit_scal_svd_rf_grid.py | 2 | 3661 | from __future__ import print_function
import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
from h2o.transforms.preprocessing import H2OScaler
from h2o.estimators.random_forest import H2ORandomForestEstimator
from sklearn.pipeline import Pipeline
from sklearn.model_selection import RandomizedSearchCV
from h2o.cross_validation import H2OKFold
from h2o.model.regression import h2o_r2_score
from sklearn.metrics.scorer import make_scorer
from scipy.stats import randint
def scale_svd_rf_pipe():
from h2o.transforms.decomposition import H2OSVD
print("Importing USArrests.csv data...")
arrests = h2o.upload_file(pyunit_utils.locate("smalldata/pca_test/USArrests.csv"))
# build transformation pipeline using sklearn's Pipeline and H2OSVD
pipe = Pipeline([
("standardize", H2OScaler()),
("svd", H2OSVD()),
("rf", H2ORandomForestEstimator())
])
params = {"standardize__center": [True, False],
"standardize__scale": [True, False],
"svd__nv": [2, 3],
"rf__ntrees": randint(50,60),
"rf__max_depth": randint(4,8),
"rf__min_rows": randint(5,10),
"svd__transform": ["none", "standardize"],
}
custom_cv = H2OKFold(arrests, n_folds=5, seed=42)
random_search = RandomizedSearchCV(pipe,
params,
n_iter=5,
scoring=make_scorer(h2o_r2_score),
cv=custom_cv,
random_state=42,
n_jobs=1)
random_search.fit(arrests[1:],arrests[0])
print(random_search.best_estimator_)
def scale_svd_rf_pipe_new_import():
from h2o.estimators.svd import H2OSingularValueDecompositionEstimator
print("Importing USArrests.csv data...")
arrests = h2o.upload_file(pyunit_utils.locate("smalldata/pca_test/USArrests.csv"))
print("Compare with SVD")
# build transformation pipeline using sklearn's Pipeline and H2OSingularValueDecompositionEstimator
pipe = Pipeline([
("standardize", H2OScaler()),
# H2OSingularValueDecompositionEstimator() call will fail, you have to call init_for_pipeline method
("svd", H2OSingularValueDecompositionEstimator().init_for_pipeline()),
("rf", H2ORandomForestEstimator())
])
params = {"standardize__center": [True, False],
"standardize__scale": [True, False],
"svd__nv": [2, 3],
"rf__ntrees": randint(50,60),
"rf__max_depth": randint(4,8),
"rf__min_rows": randint(5,10),
"svd__transform": ["none", "standardize"],
}
custom_cv = H2OKFold(arrests, n_folds=5, seed=42)
random_search = RandomizedSearchCV(pipe,
params,
n_iter=5,
scoring=make_scorer(h2o_r2_score),
cv=custom_cv,
random_state=42,
n_jobs=1)
random_search.fit(arrests[1:], arrests[0])
print(random_search.best_estimator_)
if __name__ == "__main__":
pyunit_utils.standalone_test(scale_svd_rf_pipe)
pyunit_utils.standalone_test(scale_svd_rf_pipe_new_import)
else:
scale_svd_rf_pipe()
scale_svd_rf_pipe_new_import()
| apache-2.0 |
RayMick/scikit-learn | sklearn/utils/__init__.py | 79 | 14202 | """
The :mod:`sklearn.utils` module includes various utilities.
"""
from collections import Sequence
import numpy as np
from scipy.sparse import issparse
import warnings
from .murmurhash import murmurhash3_32
from .validation import (as_float_array,
assert_all_finite,
check_random_state, column_or_1d, check_array,
check_consistent_length, check_X_y, indexable,
check_symmetric, DataConversionWarning)
from .class_weight import compute_class_weight, compute_sample_weight
from ..externals.joblib import cpu_count
__all__ = ["murmurhash3_32", "as_float_array",
"assert_all_finite", "check_array",
"check_random_state",
"compute_class_weight", "compute_sample_weight",
"column_or_1d", "safe_indexing",
"check_consistent_length", "check_X_y", 'indexable',
"check_symmetric"]
class deprecated(object):
"""Decorator to mark a function or class as deprecated.
Issue a warning when the function is called/the class is instantiated and
adds a warning to the docstring.
The optional extra argument will be appended to the deprecation message
and the docstring. Note: to use this with the default value for extra, put
in an empty of parentheses:
>>> from sklearn.utils import deprecated
>>> deprecated() # doctest: +ELLIPSIS
<sklearn.utils.deprecated object at ...>
>>> @deprecated()
... def some_function(): pass
"""
# Adapted from http://wiki.python.org/moin/PythonDecoratorLibrary,
# but with many changes.
def __init__(self, extra=''):
"""
Parameters
----------
extra: string
to be added to the deprecation messages
"""
self.extra = extra
def __call__(self, obj):
if isinstance(obj, type):
return self._decorate_class(obj)
else:
return self._decorate_fun(obj)
def _decorate_class(self, cls):
msg = "Class %s is deprecated" % cls.__name__
if self.extra:
msg += "; %s" % self.extra
# FIXME: we should probably reset __new__ for full generality
init = cls.__init__
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return init(*args, **kwargs)
cls.__init__ = wrapped
wrapped.__name__ = '__init__'
wrapped.__doc__ = self._update_doc(init.__doc__)
wrapped.deprecated_original = init
return cls
def _decorate_fun(self, fun):
"""Decorate function fun"""
msg = "Function %s is deprecated" % fun.__name__
if self.extra:
msg += "; %s" % self.extra
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return fun(*args, **kwargs)
wrapped.__name__ = fun.__name__
wrapped.__dict__ = fun.__dict__
wrapped.__doc__ = self._update_doc(fun.__doc__)
return wrapped
def _update_doc(self, olddoc):
newdoc = "DEPRECATED"
if self.extra:
newdoc = "%s: %s" % (newdoc, self.extra)
if olddoc:
newdoc = "%s\n\n%s" % (newdoc, olddoc)
return newdoc
def safe_mask(X, mask):
"""Return a mask which is safe to use on X.
Parameters
----------
X : {array-like, sparse matrix}
Data on which to apply mask.
mask: array
Mask to be used on X.
Returns
-------
mask
"""
mask = np.asarray(mask)
if np.issubdtype(mask.dtype, np.int):
return mask
if hasattr(X, "toarray"):
ind = np.arange(mask.shape[0])
mask = ind[mask]
return mask
def safe_indexing(X, indices):
"""Return items or rows from X using indices.
Allows simple indexing of lists or arrays.
Parameters
----------
X : array-like, sparse-matrix, list.
Data from which to sample rows or items.
indices : array-like, list
Indices according to which X will be subsampled.
"""
if hasattr(X, "iloc"):
# Pandas Dataframes and Series
try:
return X.iloc[indices]
except ValueError:
# Cython typed memoryviews internally used in pandas do not support
# readonly buffers.
warnings.warn("Copying input dataframe for slicing.",
DataConversionWarning)
return X.copy().iloc[indices]
elif hasattr(X, "shape"):
if hasattr(X, 'take') and (hasattr(indices, 'dtype') and
indices.dtype.kind == 'i'):
# This is often substantially faster than X[indices]
return X.take(indices, axis=0)
else:
return X[indices]
else:
return [X[idx] for idx in indices]
def resample(*arrays, **options):
"""Resample arrays or sparse matrices in a consistent way
The default strategy implements one step of the bootstrapping
procedure.
Parameters
----------
*arrays : sequence of indexable data-structures
Indexable data-structures can be arrays, lists, dataframes or scipy
sparse matrices with consistent first dimension.
replace : boolean, True by default
Implements resampling with replacement. If False, this will implement
(sliced) random permutations.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
Returns
-------
resampled_arrays : sequence of indexable data-structures
Sequence of resampled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import resample
>>> X, X_sparse, y = resample(X, X_sparse, y, random_state=0)
>>> X
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 4 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([0, 1, 0])
>>> resample(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.shuffle`
"""
random_state = check_random_state(options.pop('random_state', None))
replace = options.pop('replace', True)
max_n_samples = options.pop('n_samples', None)
if options:
raise ValueError("Unexpected kw arguments: %r" % options.keys())
if len(arrays) == 0:
return None
first = arrays[0]
n_samples = first.shape[0] if hasattr(first, 'shape') else len(first)
if max_n_samples is None:
max_n_samples = n_samples
if max_n_samples > n_samples:
raise ValueError("Cannot sample %d out of arrays with dim %d" % (
max_n_samples, n_samples))
check_consistent_length(*arrays)
if replace:
indices = random_state.randint(0, n_samples, size=(max_n_samples,))
else:
indices = np.arange(n_samples)
random_state.shuffle(indices)
indices = indices[:max_n_samples]
# convert sparse matrices to CSR for row-based indexing
arrays = [a.tocsr() if issparse(a) else a for a in arrays]
resampled_arrays = [safe_indexing(a, indices) for a in arrays]
if len(resampled_arrays) == 1:
# syntactic sugar for the unit argument case
return resampled_arrays[0]
else:
return resampled_arrays
def shuffle(*arrays, **options):
"""Shuffle arrays or sparse matrices in a consistent way
This is a convenience alias to ``resample(*arrays, replace=False)`` to do
random permutations of the collections.
Parameters
----------
*arrays : sequence of indexable data-structures
Indexable data-structures can be arrays, lists, dataframes or scipy
sparse matrices with consistent first dimension.
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
Returns
-------
shuffled_arrays : sequence of indexable data-structures
Sequence of shuffled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import shuffle
>>> X, X_sparse, y = shuffle(X, X_sparse, y, random_state=0)
>>> X
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 3 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([2, 1, 0])
>>> shuffle(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.resample`
"""
options['replace'] = False
return resample(*arrays, **options)
def safe_sqr(X, copy=True):
"""Element wise squaring of array-likes and sparse matrices.
Parameters
----------
X : array like, matrix, sparse matrix
copy : boolean, optional, default True
Whether to create a copy of X and operate on it or to perform
inplace computation (default behaviour).
Returns
-------
X ** 2 : element wise square
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], ensure_2d=False)
if issparse(X):
if copy:
X = X.copy()
X.data **= 2
else:
if copy:
X = X ** 2
else:
X **= 2
return X
def gen_batches(n, batch_size):
"""Generator to create slices containing batch_size elements, from 0 to n.
The last slice may contain less than batch_size elements, when batch_size
does not divide n.
Examples
--------
>>> from sklearn.utils import gen_batches
>>> list(gen_batches(7, 3))
[slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)]
>>> list(gen_batches(6, 3))
[slice(0, 3, None), slice(3, 6, None)]
>>> list(gen_batches(2, 3))
[slice(0, 2, None)]
"""
start = 0
for _ in range(int(n // batch_size)):
end = start + batch_size
yield slice(start, end)
start = end
if start < n:
yield slice(start, n)
def gen_even_slices(n, n_packs, n_samples=None):
"""Generator to create n_packs slices going up to n.
Pass n_samples when the slices are to be used for sparse matrix indexing;
slicing off-the-end raises an exception, while it works for NumPy arrays.
Examples
--------
>>> from sklearn.utils import gen_even_slices
>>> list(gen_even_slices(10, 1))
[slice(0, 10, None)]
>>> list(gen_even_slices(10, 10)) #doctest: +ELLIPSIS
[slice(0, 1, None), slice(1, 2, None), ..., slice(9, 10, None)]
>>> list(gen_even_slices(10, 5)) #doctest: +ELLIPSIS
[slice(0, 2, None), slice(2, 4, None), ..., slice(8, 10, None)]
>>> list(gen_even_slices(10, 3))
[slice(0, 4, None), slice(4, 7, None), slice(7, 10, None)]
"""
start = 0
if n_packs < 1:
raise ValueError("gen_even_slices got n_packs=%s, must be >=1" % n_packs)
for pack_num in range(n_packs):
this_n = n // n_packs
if pack_num < n % n_packs:
this_n += 1
if this_n > 0:
end = start + this_n
if n_samples is not None:
end = min(n_samples, end)
yield slice(start, end, None)
start = end
def _get_n_jobs(n_jobs):
"""Get number of jobs for the computation.
This function reimplements the logic of joblib to determine the actual
number of jobs depending on the cpu count. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is useful
for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used.
Thus for n_jobs = -2, all CPUs but one are used.
Parameters
----------
n_jobs : int
Number of jobs stated in joblib convention.
Returns
-------
n_jobs : int
The actual number of jobs as positive integer.
Examples
--------
>>> from sklearn.utils import _get_n_jobs
>>> _get_n_jobs(4)
4
>>> jobs = _get_n_jobs(-2)
>>> assert jobs == max(cpu_count() - 1, 1)
>>> _get_n_jobs(0)
Traceback (most recent call last):
...
ValueError: Parameter n_jobs == 0 has no meaning.
"""
if n_jobs < 0:
return max(cpu_count() + 1 + n_jobs, 1)
elif n_jobs == 0:
raise ValueError('Parameter n_jobs == 0 has no meaning.')
else:
return n_jobs
def tosequence(x):
"""Cast iterable x to a Sequence, avoiding a copy if possible."""
if isinstance(x, np.ndarray):
return np.asarray(x)
elif isinstance(x, Sequence):
return x
else:
return list(x)
class ConvergenceWarning(UserWarning):
"""Custom warning to capture convergence problems"""
class DataDimensionalityWarning(UserWarning):
"""Custom warning to notify potential issues with data dimensionality"""
| bsd-3-clause |
AlexandreAbraham/brainhack2013 | craddock_things.py | 1 | 3175 | import numpy as np
import scipy as sp
import nibabel as nb
import warnings
from sklearn.cluster import spectral_clustering
from sklearn.feature_extraction import image
import nilearn.input_data.multi_nifti_masker as mnm
def compute_similarity_matrix(mask_data, subj_data):
'''
Compute sparse similarity matrix
Parameters
----------
subj_data: fmri data for a given subject
mask_data: mask to use on the data
Returns
-------
correlations_matrix: matrix of similarity
'''
mask_sh = mask_data.shape
connectivity = image.grid_to_graph(n_x=mask_sh[0], n_y=mask_sh[1],
n_z=mask_sh[2], mask=mask_data)
values = []
R = connectivity.row
C = connectivity.col
# Fill the matrix
for r, c in zip(R, C):
corr_coeff = sp.stats.pearsonr(subj_data[:, r], subj_data[:, c])[0]
if np.isnan(corr_coeff):
warnings.warn("NaN correlation present --> replaced by 0")
corr_coeff = 0
values.append(corr_coeff)
values = np.array(values)
pos = np.where(values < 0)
if pos[0].shape > 0:
warnings.warn("Negative correlations present --> replace them by"
"exp(corr/mean_corr)")
values[pos] = np.exp(values[pos] / values.mean())
#Make correlation matrix symmetric
values = .5 * (values + values.T)
Jvox = subj_data.shape[1]
correlations_matrix = sp.sparse.coo_matrix((values, (R, C)),
shape=(Jvox, Jvox))
return correlations_matrix
#filenames
mask_fn = 'gm_maskfile.nii.gz'
subj_files = ['subject1.nii.gz', 'subject2.nii.gz', 'subject3.nii.gz']
#load the data
mask_data = nb.load(mask_fn).get_data()
mask_sh = mask_data.shape
nifti_masker = mnm.MultiNiftiMasker(mask_fn)
Nm = nifti_masker.fit(subj_files)
Masked_data = Nm.fit_transform(subj_files) # shape N*J (nb_pts_tempo*nbvox)
print 'Individual clusterings'
for isubj, subj_fn in enumerate(subj_files):
print 'subject nb ', isubj, '----'
#example on one subject - test
subj_data = Masked_data[isubj]
N = subj_data.shape[0]
#prepare for Pearson coefficient computation
subj_data_std = sp.std(subj_data, 0)
# replace 0 with large number to avoid div by zero
subj_data_std[subj_data_std == 0] = 1000000
subj_data_mean = sp.mean(subj_data, 0)
subj_data = (subj_data - subj_data_mean) / subj_data_std
#compute correlations matrix
print 'compute correlation matrix'
corr_matrix_subj = compute_similarity_matrix(mask_data, subj_data)
if isubj == 0:
corr_matrices = corr_matrix_subj
else:
corr_matrices = corr_matrices + corr_matrix_subj
#spectral clustering
print 'perform clustering'
#labels = spectral_clustering(corr_matrix, n_clusters=N)
#Groupe-level spectral clustering
K = [100, 150]
#average similarity matrices
sum_corr_matrices = sum(corr_matrices)
average_matrix = sum_corr_matrices / len(subj_files)
#cluster average matrix
print 'Group-level clustering'
for k in K:
print 'Nb of lcusters:', k
group_labels = spectral_clustering(average_matrix, n_clusters=k)
| bsd-3-clause |
RRShieldsCutler/clusterpluck | clusterpluck/scripts/mpi_parallel_collapse.py | 1 | 4144 | #!/usr/bin/env Python
import argparse
import sys
import numpy as np
import pandas as pd
import warnings
from clusterpluck.scripts.cluster_dictionary import build_cluster_map
from clusterpluck.scripts.orfs_in_common import generate_index_list
from clusterpluck.scripts.orfs_in_common import pick_a_cluster
from functools import partial
from scoop import futures
# The arg parser
def make_arg_parser():
parser = argparse.ArgumentParser(description='Collapse ORF matrix into a scored cluster matrix. Run with "python -m scoop -vv -n 480 mpi_parallel_collapse [args]"')
parser.add_argument('-i', '--input', help='Input is the ORF matrix CSV file.', default='-')
parser.add_argument('-m', '--mpfa', help='The multi-protein fasta file (.mpfa) from which to build the dictionary')
parser.add_argument('-b', '--bread', help='Where to find the cluster information in the header for the sequence (default="ref|,|")', default='ref|,|')
parser.add_argument('-o', '--output', help='Where to save the output csv; default to screen', required=False, default='-')
return parser
def parallel_clustermean(cluster2, mx):
# subsets the smaller matrix by rows belonging to one cluster
mx_dubsub = mx.filter(like=cluster2, axis=0)
# finds the mean of the cells in the cluster x cluster2 matrix
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=RuntimeWarning) # np doesn't like taking mean of empty slices
cc_mean = np.nanmean(mx_dubsub.values, dtype='float64')
# saves this ratio into the pre-existing array at the (cluster, cluster2) location
mat = np.zeros((1, 1))
mat[0, 0] = cc_mean
return pd.DataFrame(mat)
def big_cluster_v_cluster(inf3, grab, inkey, c_list, j):
mx = pd.read_csv(inf3, sep=',', header=0, usecols=grab, engine='c') # loads in only the columns from the grab list, i.e. all cols for a unique cluster
mx.index = inkey # reindexes the df with the orf labels after importing specific columns with usecols
# how many orfs in the full cluster
# args_list = [mx] # organizes all the arguments that the parallelized function needs into a list
if __name__ == '__main__':
results = list(futures.map(partial(parallel_clustermean, mx=mx), c_list))
bigmat = pd.concat(results, axis=0) # stack all the results into a single column in a dataframe
# print(bigmat.shape[0])
bigmat.index = c_list # now the index is just the clusters, not the orfs
# DEBUG - will print the progress every 50 clusters (across the slower dimension).
if j % 50:
pass
elif j == 0:
print('Processed first cluster... moving on!')
elif j == 1:
print('Worry not, second cluster has been processed...')
else:
print('Processed %d clusters' % j)
del mx
return bigmat
def main():
parser = make_arg_parser()
args = parser.parse_args()
# Parse command line
with open(args.mpfa, 'r') as inf:
# Generates dictionary with each unique 'refseq_cluster' as keys, ORFs as values
cluster_map = build_cluster_map(inf, bread=args.bread)
with open(args.input, 'r') as in_csv:
with open(args.output, 'w') if args.output != '-' else sys.stdout as outf:
print('\nOk, processing input file in pieces...\n')
inkey = generate_index_list(in_csv)
c_list = list(cluster_map.keys())
ct = len(c_list)
print('Found %d clusters...' % ct)
results_list = []
j = 0
for cluster in c_list:
grab = pick_a_cluster(inkey, cluster) # uses the name of the cluster to get a list of all orfs for a particular unique cluster
# print(grab)
with open(args.input, 'r') as inf3:
bigmat = big_cluster_v_cluster(inf3, grab, inkey, c_list, j)
# print(bigmat)
results_list.append(bigmat) # returns a list of dataframes, one for each cluster column
j += 1
print('File processing complete; writing output file...\n')
outdf = pd.concat(results_list, axis=1)
outdf.columns = c_list # names the columns (and index, next line) according to clusters in the order they were processed
outdf.index = c_list
outdf.sort_index(axis=0, inplace=True)
outdf.sort_index(axis=1, inplace=True)
outdf = outdf.round(decimals=2)
outdf.to_csv(outf)
if __name__ == '__main__':
main()
| mit |
bjlittle/iris | lib/iris/tests/unit/quickplot/test_pcolormesh.py | 1 | 1688 | # Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""Unit tests for the `iris.quickplot.pcolormesh` function."""
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests # isort:skip
import numpy as np
from iris.tests.stock import simple_2d
from iris.tests.unit.plot import MixinCoords, TestGraphicStringCoord
if tests.MPL_AVAILABLE:
import iris.quickplot as qplt
@tests.skip_plot
class TestStringCoordPlot(TestGraphicStringCoord):
def test_yaxis_labels(self):
qplt.pcolormesh(self.cube, coords=("bar", "str_coord"))
self.assertBoundsTickLabels("yaxis")
def test_xaxis_labels(self):
qplt.pcolormesh(self.cube, coords=("str_coord", "bar"))
self.assertBoundsTickLabels("xaxis")
@tests.skip_plot
class TestCoords(tests.IrisTest, MixinCoords):
def setUp(self):
# We have a 2d cube with dimensionality (bar: 3; foo: 4)
self.cube = simple_2d(with_bounds=True)
coord = self.cube.coord("foo")
self.foo = coord.contiguous_bounds()
self.foo_index = np.arange(coord.points.size + 1)
coord = self.cube.coord("bar")
self.bar = coord.contiguous_bounds()
self.bar_index = np.arange(coord.points.size + 1)
self.data = self.cube.data
self.dataT = self.data.T
self.mpl_patch = self.patch(
"matplotlib.pyplot.pcolormesh", return_value=None
)
self.draw_func = qplt.pcolormesh
if __name__ == "__main__":
tests.main()
| lgpl-3.0 |
garibaldu/multicauseRBM | Max/rbmpy/plotter.py | 1 | 6666 | # You're a wizard Harry plotter
import numpy as np
import matplotlib, math
import time, datetime
import matplotlib.pyplot as plt
class Plot(object):
def __init__(self, data, title = None, subplot_titles = None, c_map = 'copper'):
subplot_size = math.sqrt(data.shape[1])
subplot_shape = (subplot_size, subplot_size)
if len(data.shape) > 2:
subplot_shape = (data.shape[1], data.shape[2])
self.data = data
self.title = title
self.subplot_titles = subplot_titles
def plot_all(self):
# lets figure this out based on the shape passed
plot(self.data, subplot_titles, title)
i = 0
for r in range(num_rows):
for c in range(num_cols):
if i < len(data):
plot_subplot(grid_idx)
else:
break;
i = i + 1
plt.show()
def plot_subplot(self, grid_idx):
plt.title = title
plt.subplot(num_rows,num_cols, grid_idx + 1)
plt.imshow(np.reshape(data[i],subplot_shape), interpolation = 'nearest', cmap = 'copper', vmin = 0, vmax = 1)
plt.axis('off')
if subplot_titles is not None:
plt.title(subplot_titles[i])
grid = np.random.rand(4, 4)
fig, axes = plt.subplots(num_rows, num_cols, figsize=(12, 6),
subplot_kw={'xticks': [], 'yticks': []})
fig.subplots_adjust(hspace=0.3, wspace=0.05)
for ax, interp_method in zip(axes.flat, methods):
ax.imshow(grid, interpolation=interp_method)
ax.set_title(interp_method)
plt.show()
class Plotter(object):
def __init__(self):
self.plots = []
def add_plot(self, plot):
self.plots.append(data)
def plot_all(self):
for plot in self.plots:
plot.plot_all()
def plot_weights(weights, vis_dim = 28):
subplot_size = int(math.sqrt(weights.shape[1]))
n_hidden = weights.shape[0]
n_vis = weights.shape[1]
n_vis_dim = vis_dim
W_per_h = weights.reshape(n_hidden,n_vis_dim, n_vis_dim)
nc = int(math.sqrt(n_hidden))
nr = n_hidden / nc
i=0
for r in range(int(nr)):
for c in range(nc):
if i<=n_hidden:
plt.subplot(nr,nc,i+1)
plt.imshow(W_per_h[i],interpolation='nearest', cmap='seismic')
plt.axis('off')
# plt.title('hid {}'.format(i))
i = i+1
plt.show()
def plot(data, subplot_titles = None, title = None,num_rows = 6, num_cols = 9):
# if it's not the shape i expect
subplot_size = math.sqrt(data.shape[1])
subplot_shape = (subplot_size, subplot_size)
if len(data.shape) > 2:
subplot_shape = (data.shape[1], data.shape[2])
i = 0
for r in range(num_rows):
for c in range(num_cols):
if i < len(data):
plt.title = title
plt.subplot(num_rows,num_cols, i + 1)
plt.imshow(np.reshape(data[i],subplot_shape), interpolation = 'nearest', cmap = 'copper', vmin = 0, vmax = 1)
plt.axis('off')
if subplot_titles is not None:
plt.title(subplot_titles[i])
i = i + 1
plt.show()
def save_plot(data, subplot_titles = None, title = None,num_rows = 6, num_cols = 9, filename = None):
# if it's not the shape i expect
if(filename == None):
filename = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d-%H:%M:%S') + ".png"
subplot_size = math.sqrt(data.shape[1])
subplot_shape = (subplot_size, subplot_size)
if len(data.shape) > 2:
subplot_shape = (data.shape[1], data.shape[2])
i = 0
for r in range(num_rows):
for c in range(num_cols):
if i < len(data):
plt.title = title
plt.subplot(num_rows,num_cols, i + 1)
plt.imshow(np.reshape(data[i],subplot_shape), interpolation = 'nearest', cmap = 'copper', vmin = 0, vmax = 1)
plt.axis('off')
if subplot_titles is not None:
plt.title(subplot_titles[i])
i = i + 1
plt.savefig(filename)
def plot_matrix(matrix, columns = None, rows = None, title = None):
# Add a table at the bottom of the axes
print(title)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
plt.axis('off')
the_table = plt.table(cellText = matrix, colLabels = columns, rowLabels = rows, loc = "center")
plt.show()
def print_matrix(matrix, titles = []):
mat_str = ''
for title in title:
pass
for i in range(len(matrix)):
for j in range(len(matrix[i])):
mat_str = "{}\t{}".format(mat_str, matrix[i][j])
mat_str += '\n'
print(mat_str)
def plot_dict(to_plot, title = "", size = None):
labels, values = zip(*to_plot.items())
indexes = np.arange(len(labels))
width = 1
plt.suptitle(title)
plt.bar(indexes, values, width)
plt.xticks(indexes + width * 0.5, labels, rotation='vertical')
plt.show()
def image(data,title="",cmap = 'cool', show_colorbar = True, filename= None, color_range = None):
plt.title = title
vmin = None
vmax = None
if color_range:
vmin, vmax = color_range
plt.imshow(data, interpolation = 'nearest', cmap=cmap,vmin=vmin, vmax=vmax )
if show_colorbar:
plt.colorbar()
if filename is not None:
plt.savefig(filename)
plt.show()
def images(data,title ="", titles = None, cmap = 'cool',filename = None, color_range = None, fig_size = None):
num_cols = 5
num_rows = math.ceil(data.shape[0]/num_cols)
plots_so_far = 0
vmin = None
vmax = None
if color_range:
vmin, vmax = color_range
ax = plt.gca()
ax.text(1,1,title)
if fig_size:
plt.figsize = fig_size
for r in range(num_rows):
for c in range(num_cols):
if plots_so_far < len(data):
plt.subplot(num_rows,num_cols, plots_so_far+1)
plt.axis('off')
plt.imshow(data[plots_so_far], interpolation = 'nearest', cmap=cmap,vmin =vmin, vmax = vmax)
else:
break
plots_so_far +=1
# plt.tight_layout()
if filename is not None:
plt.savefig(filename)
plt.show()
"""
Demo of a function to create Hinton diagrams.
Hinton diagrams are useful for visualizing the values of a 2D array (e.g.
a weight matrix): Positive and negative values are represented by white and
black squares, respectively, and the size of each square represents the
magnitude of each value.
Initial idea from David Warde-Farley on the SciPy Cookbook
"""
def hinton(matrix, max_weight=None, ax=None):
"""Draw Hinton diagram for visualizing a weight matrix."""
ax = ax if ax is not None else plt.gca()
if not max_weight:
max_weight = 2**np.ceil(np.log(np.abs(matrix).max())/np.log(2))
ax.patch.set_facecolor('gray')
ax.set_aspect('equal', 'box')
ax.xaxis.set_major_locator(plt.NullLocator())
ax.yaxis.set_major_locator(plt.NullLocator())
for (x,y),w in np.ndenumerate(matrix):
color = 'white' if w > 0 else 'black'
size = np.sqrt(np.abs(w))
rect = plt.Rectangle([x - size / 2, y - size / 2], size, size,
facecolor=color, edgecolor=color)
ax.add_patch(rect)
ax.autoscale_view()
ax.invert_yaxis()
| mit |
zuku1985/scikit-learn | sklearn/tests/test_pipeline.py | 5 | 27078 | """
Test the pipeline module.
"""
import numpy as np
from scipy import sparse
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_dict_equal
from sklearn.base import clone, BaseEstimator
from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline, make_union
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.cluster import KMeans
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.decomposition import PCA, TruncatedSVD
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
from sklearn.feature_extraction.text import CountVectorizer
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
class NoFit(object):
"""Small class to test parameter dispatching.
"""
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class NoTrans(NoFit):
def fit(self, X, y):
return self
def get_params(self, deep=False):
return {'a': self.a, 'b': self.b}
def set_params(self, **params):
self.a = params['a']
return self
class NoInvTransf(NoTrans):
def transform(self, X, y=None):
return X
class Transf(NoInvTransf):
def transform(self, X, y=None):
return X
def inverse_transform(self, X):
return X
class TransfFitParams(Transf):
def fit(self, X, y, **fit_params):
self.fit_params = fit_params
return self
class Mult(BaseEstimator):
def __init__(self, mult=1):
self.mult = mult
def fit(self, X, y):
return self
def transform(self, X):
return np.asarray(X) * self.mult
def inverse_transform(self, X):
return np.asarray(X) / self.mult
def predict(self, X):
return (np.asarray(X) * self.mult).sum(axis=1)
predict_proba = predict_log_proba = decision_function = predict
def score(self, X, y=None):
return np.sum(X)
class FitParamT(BaseEstimator):
"""Mock classifier
"""
def __init__(self):
self.successful = False
def fit(self, X, y, should_succeed=False):
self.successful = should_succeed
def predict(self, X):
return self.successful
def fit_predict(self, X, y, should_succeed=False):
self.fit(X, y, should_succeed=should_succeed)
return self.predict(X)
def score(self, X, y=None, sample_weight=None):
if sample_weight is not None:
X = X * sample_weight
return np.sum(X)
def test_pipeline_init():
# Test the various init parameters of the pipeline.
assert_raises(TypeError, Pipeline)
# Check that we can't instantiate pipelines with objects without fit
# method
assert_raises_regex(TypeError,
'Last step of Pipeline should implement fit. '
'.*NoFit.*',
Pipeline, [('clf', NoFit())])
# Smoke test with only an estimator
clf = NoTrans()
pipe = Pipeline([('svc', clf)])
assert_equal(pipe.get_params(deep=True),
dict(svc__a=None, svc__b=None, svc=clf,
**pipe.get_params(deep=False)))
# Check that params are set
pipe.set_params(svc__a=0.1)
assert_equal(clf.a, 0.1)
assert_equal(clf.b, None)
# Smoke test the repr:
repr(pipe)
# Test with two objects
clf = SVC()
filter1 = SelectKBest(f_classif)
pipe = Pipeline([('anova', filter1), ('svc', clf)])
# Check that we can't instantiate with non-transformers on the way
# Note that NoTrans implements fit, but not transform
assert_raises_regex(TypeError,
'All intermediate steps should be transformers'
'.*\\bNoTrans\\b.*',
Pipeline, [('t', NoTrans()), ('svc', clf)])
# Check that params are set
pipe.set_params(svc__C=0.1)
assert_equal(clf.C, 0.1)
# Smoke test the repr:
repr(pipe)
# Check that params are not set when naming them wrong
assert_raises(ValueError, pipe.set_params, anova__C=0.1)
# Test clone
pipe2 = clone(pipe)
assert_false(pipe.named_steps['svc'] is pipe2.named_steps['svc'])
# Check that apart from estimators, the parameters are the same
params = pipe.get_params(deep=True)
params2 = pipe2.get_params(deep=True)
for x in pipe.get_params(deep=False):
params.pop(x)
for x in pipe2.get_params(deep=False):
params2.pop(x)
# Remove estimators that where copied
params.pop('svc')
params.pop('anova')
params2.pop('svc')
params2.pop('anova')
assert_equal(params, params2)
def test_pipeline_methods_anova():
# Test the various methods of the pipeline (anova).
iris = load_iris()
X = iris.data
y = iris.target
# Test with Anova + LogisticRegression
clf = LogisticRegression()
filter1 = SelectKBest(f_classif, k=2)
pipe = Pipeline([('anova', filter1), ('logistic', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_fit_params():
# Test that the pipeline can take fit parameters
pipe = Pipeline([('transf', Transf()), ('clf', FitParamT())])
pipe.fit(X=None, y=None, clf__should_succeed=True)
# classifier should return True
assert_true(pipe.predict(None))
# and transformer params should not be changed
assert_true(pipe.named_steps['transf'].a is None)
assert_true(pipe.named_steps['transf'].b is None)
# invalid parameters should raise an error message
assert_raise_message(
TypeError,
"fit() got an unexpected keyword argument 'bad'",
pipe.fit, None, None, clf__bad=True
)
def test_pipeline_sample_weight_supported():
# Pipeline should pass sample_weight
X = np.array([[1, 2]])
pipe = Pipeline([('transf', Transf()), ('clf', FitParamT())])
pipe.fit(X, y=None)
assert_equal(pipe.score(X), 3)
assert_equal(pipe.score(X, y=None), 3)
assert_equal(pipe.score(X, y=None, sample_weight=None), 3)
assert_equal(pipe.score(X, sample_weight=np.array([2, 3])), 8)
def test_pipeline_sample_weight_unsupported():
# When sample_weight is None it shouldn't be passed
X = np.array([[1, 2]])
pipe = Pipeline([('transf', Transf()), ('clf', Mult())])
pipe.fit(X, y=None)
assert_equal(pipe.score(X), 3)
assert_equal(pipe.score(X, sample_weight=None), 3)
assert_raise_message(
TypeError,
"score() got an unexpected keyword argument 'sample_weight'",
pipe.score, X, sample_weight=np.array([2, 3])
)
def test_pipeline_raise_set_params_error():
# Test pipeline raises set params error message for nested models.
pipe = Pipeline([('cls', LinearRegression())])
# expected error message
error_msg = ('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.')
assert_raise_message(ValueError,
error_msg % ('fake', 'Pipeline'),
pipe.set_params,
fake='nope')
# nested model check
assert_raise_message(ValueError,
error_msg % ("fake", pipe),
pipe.set_params,
fake__estimator='nope')
def test_pipeline_methods_pca_svm():
# Test the various methods of the pipeline (pca + svm).
iris = load_iris()
X = iris.data
y = iris.target
# Test with PCA + SVC
clf = SVC(probability=True, random_state=0)
pca = PCA(svd_solver='full', n_components='mle', whiten=True)
pipe = Pipeline([('pca', pca), ('svc', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_methods_preprocessing_svm():
# Test the various methods of the pipeline (preprocessing + svm).
iris = load_iris()
X = iris.data
y = iris.target
n_samples = X.shape[0]
n_classes = len(np.unique(y))
scaler = StandardScaler()
pca = PCA(n_components=2, svd_solver='randomized', whiten=True)
clf = SVC(probability=True, random_state=0, decision_function_shape='ovr')
for preprocessing in [scaler, pca]:
pipe = Pipeline([('preprocess', preprocessing), ('svc', clf)])
pipe.fit(X, y)
# check shapes of various prediction functions
predict = pipe.predict(X)
assert_equal(predict.shape, (n_samples,))
proba = pipe.predict_proba(X)
assert_equal(proba.shape, (n_samples, n_classes))
log_proba = pipe.predict_log_proba(X)
assert_equal(log_proba.shape, (n_samples, n_classes))
decision_function = pipe.decision_function(X)
assert_equal(decision_function.shape, (n_samples, n_classes))
pipe.score(X, y)
def test_fit_predict_on_pipeline():
# test that the fit_predict method is implemented on a pipeline
# test that the fit_predict on pipeline yields same results as applying
# transform and clustering steps separately
iris = load_iris()
scaler = StandardScaler()
km = KMeans(random_state=0)
# As pipeline doesn't clone estimators on construction,
# it must have its own estimators
scaler_for_pipeline = StandardScaler()
km_for_pipeline = KMeans(random_state=0)
# first compute the transform and clustering step separately
scaled = scaler.fit_transform(iris.data)
separate_pred = km.fit_predict(scaled)
# use a pipeline to do the transform and clustering in one step
pipe = Pipeline([
('scaler', scaler_for_pipeline),
('Kmeans', km_for_pipeline)
])
pipeline_pred = pipe.fit_predict(iris.data)
assert_array_almost_equal(pipeline_pred, separate_pred)
def test_fit_predict_on_pipeline_without_fit_predict():
# tests that a pipeline does not have fit_predict method when final
# step of pipeline does not have fit_predict defined
scaler = StandardScaler()
pca = PCA(svd_solver='full')
pipe = Pipeline([('scaler', scaler), ('pca', pca)])
assert_raises_regex(AttributeError,
"'PCA' object has no attribute 'fit_predict'",
getattr, pipe, 'fit_predict')
def test_fit_predict_with_intermediate_fit_params():
# tests that Pipeline passes fit_params to intermediate steps
# when fit_predict is invoked
pipe = Pipeline([('transf', TransfFitParams()), ('clf', FitParamT())])
pipe.fit_predict(X=None,
y=None,
transf__should_get_this=True,
clf__should_succeed=True)
assert_true(pipe.named_steps['transf'].fit_params['should_get_this'])
assert_true(pipe.named_steps['clf'].successful)
assert_false('should_succeed' in pipe.named_steps['transf'].fit_params)
def test_feature_union():
# basic sanity check for feature union
iris = load_iris()
X = iris.data
X -= X.mean(axis=0)
y = iris.target
svd = TruncatedSVD(n_components=2, random_state=0)
select = SelectKBest(k=1)
fs = FeatureUnion([("svd", svd), ("select", select)])
fs.fit(X, y)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 3))
# check if it does the expected thing
assert_array_almost_equal(X_transformed[:, :-1], svd.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
# test if it also works for sparse input
# We use a different svd object to control the random_state stream
fs = FeatureUnion([("svd", svd), ("select", select)])
X_sp = sparse.csr_matrix(X)
X_sp_transformed = fs.fit_transform(X_sp, y)
assert_array_almost_equal(X_transformed, X_sp_transformed.toarray())
# test setting parameters
fs.set_params(select__k=2)
assert_equal(fs.fit_transform(X, y).shape, (X.shape[0], 4))
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", Transf()), ("svd", svd), ("select", select)])
X_transformed = fs.fit_transform(X, y)
assert_equal(X_transformed.shape, (X.shape[0], 8))
# test error if some elements do not support transform
assert_raises_regex(TypeError,
'All estimators should implement fit and '
'transform.*\\bNoTrans\\b',
FeatureUnion,
[("transform", Transf()), ("no_transform", NoTrans())])
def test_make_union():
pca = PCA(svd_solver='full')
mock = Transf()
fu = make_union(pca, mock)
names, transformers = zip(*fu.transformer_list)
assert_equal(names, ("pca", "transf"))
assert_equal(transformers, (pca, mock))
def test_make_union_kwargs():
pca = PCA(svd_solver='full')
mock = Transf()
fu = make_union(pca, mock, n_jobs=3)
assert_equal(fu.transformer_list, make_union(pca, mock).transformer_list)
assert_equal(3, fu.n_jobs)
# invalid keyword parameters should raise an error message
assert_raise_message(
TypeError,
'Unknown keyword arguments: "transformer_weights"',
make_union, pca, mock, transformer_weights={'pca': 10, 'Transf': 1}
)
def test_pipeline_transform():
# Test whether pipeline works with a transformer at the end.
# Also test pipeline.transform and pipeline.inverse_transform
iris = load_iris()
X = iris.data
pca = PCA(n_components=2, svd_solver='full')
pipeline = Pipeline([('pca', pca)])
# test transform and fit_transform:
X_trans = pipeline.fit(X).transform(X)
X_trans2 = pipeline.fit_transform(X)
X_trans3 = pca.fit_transform(X)
assert_array_almost_equal(X_trans, X_trans2)
assert_array_almost_equal(X_trans, X_trans3)
X_back = pipeline.inverse_transform(X_trans)
X_back2 = pca.inverse_transform(X_trans)
assert_array_almost_equal(X_back, X_back2)
def test_pipeline_fit_transform():
# Test whether pipeline works with a transformer missing fit_transform
iris = load_iris()
X = iris.data
y = iris.target
transf = Transf()
pipeline = Pipeline([('mock', transf)])
# test fit_transform:
X_trans = pipeline.fit_transform(X, y)
X_trans2 = transf.fit(X, y).transform(X)
assert_array_almost_equal(X_trans, X_trans2)
def test_set_pipeline_steps():
transf1 = Transf()
transf2 = Transf()
pipeline = Pipeline([('mock', transf1)])
assert_true(pipeline.named_steps['mock'] is transf1)
# Directly setting attr
pipeline.steps = [('mock2', transf2)]
assert_true('mock' not in pipeline.named_steps)
assert_true(pipeline.named_steps['mock2'] is transf2)
assert_equal([('mock2', transf2)], pipeline.steps)
# Using set_params
pipeline.set_params(steps=[('mock', transf1)])
assert_equal([('mock', transf1)], pipeline.steps)
# Using set_params to replace single step
pipeline.set_params(mock=transf2)
assert_equal([('mock', transf2)], pipeline.steps)
# With invalid data
pipeline.set_params(steps=[('junk', ())])
assert_raises(TypeError, pipeline.fit, [[1]], [1])
assert_raises(TypeError, pipeline.fit_transform, [[1]], [1])
def test_set_pipeline_step_none():
# Test setting Pipeline steps to None
X = np.array([[1]])
y = np.array([1])
mult2 = Mult(mult=2)
mult3 = Mult(mult=3)
mult5 = Mult(mult=5)
def make():
return Pipeline([('m2', mult2), ('m3', mult3), ('last', mult5)])
pipeline = make()
exp = 2 * 3 * 5
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal([exp], pipeline.fit(X).predict(X))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
pipeline.set_params(m3=None)
exp = 2 * 5
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal([exp], pipeline.fit(X).predict(X))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
assert_dict_equal(pipeline.get_params(deep=True),
{'steps': pipeline.steps,
'm2': mult2,
'm3': None,
'last': mult5,
'm2__mult': 2,
'last__mult': 5,
})
pipeline.set_params(m2=None)
exp = 5
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal([exp], pipeline.fit(X).predict(X))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
# for other methods, ensure no AttributeErrors on None:
other_methods = ['predict_proba', 'predict_log_proba',
'decision_function', 'transform', 'score']
for method in other_methods:
getattr(pipeline, method)(X)
pipeline.set_params(m2=mult2)
exp = 2 * 5
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal([exp], pipeline.fit(X).predict(X))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
pipeline = make()
pipeline.set_params(last=None)
# mult2 and mult3 are active
exp = 6
assert_array_equal([[exp]], pipeline.fit(X, y).transform(X))
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
assert_raise_message(AttributeError,
"'NoneType' object has no attribute 'predict'",
getattr, pipeline, 'predict')
# Check None step at construction time
exp = 2 * 5
pipeline = Pipeline([('m2', mult2), ('m3', None), ('last', mult5)])
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal([exp], pipeline.fit(X).predict(X))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
def test_pipeline_ducktyping():
pipeline = make_pipeline(Mult(5))
pipeline.predict
pipeline.transform
pipeline.inverse_transform
pipeline = make_pipeline(Transf())
assert_false(hasattr(pipeline, 'predict'))
pipeline.transform
pipeline.inverse_transform
pipeline = make_pipeline(None)
assert_false(hasattr(pipeline, 'predict'))
pipeline.transform
pipeline.inverse_transform
pipeline = make_pipeline(Transf(), NoInvTransf())
assert_false(hasattr(pipeline, 'predict'))
pipeline.transform
assert_false(hasattr(pipeline, 'inverse_transform'))
pipeline = make_pipeline(NoInvTransf(), Transf())
assert_false(hasattr(pipeline, 'predict'))
pipeline.transform
assert_false(hasattr(pipeline, 'inverse_transform'))
def test_make_pipeline():
t1 = Transf()
t2 = Transf()
pipe = make_pipeline(t1, t2)
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transf-1")
assert_equal(pipe.steps[1][0], "transf-2")
pipe = make_pipeline(t1, t2, FitParamT())
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transf-1")
assert_equal(pipe.steps[1][0], "transf-2")
assert_equal(pipe.steps[2][0], "fitparamt")
def test_feature_union_weights():
# test feature union with transformer weights
iris = load_iris()
X = iris.data
y = iris.target
pca = PCA(n_components=2, svd_solver='randomized', random_state=0)
select = SelectKBest(k=1)
# test using fit followed by transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
fs.fit(X, y)
X_transformed = fs.transform(X)
# test using fit_transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
X_fit_transformed = fs.fit_transform(X, y)
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", Transf()), ("pca", pca), ("select", select)],
transformer_weights={"mock": 10})
X_fit_transformed_wo_method = fs.fit_transform(X, y)
# check against expected result
# We use a different pca object to control the random_state stream
assert_array_almost_equal(X_transformed[:, :-1], 10 * pca.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_array_almost_equal(X_fit_transformed[:, :-1],
10 * pca.fit_transform(X))
assert_array_equal(X_fit_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_equal(X_fit_transformed_wo_method.shape, (X.shape[0], 7))
def test_feature_union_parallel():
# test that n_jobs work for FeatureUnion
X = JUNK_FOOD_DOCS
fs = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
])
fs_parallel = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs_parallel2 = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs.fit(X)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape[0], len(X))
fs_parallel.fit(X)
X_transformed_parallel = fs_parallel.transform(X)
assert_equal(X_transformed.shape, X_transformed_parallel.shape)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel.toarray()
)
# fit_transform should behave the same
X_transformed_parallel2 = fs_parallel2.fit_transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
# transformers should stay fit after fit_transform
X_transformed_parallel2 = fs_parallel2.transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
def test_feature_union_feature_names():
word_vect = CountVectorizer(analyzer="word")
char_vect = CountVectorizer(analyzer="char_wb", ngram_range=(3, 3))
ft = FeatureUnion([("chars", char_vect), ("words", word_vect)])
ft.fit(JUNK_FOOD_DOCS)
feature_names = ft.get_feature_names()
for feat in feature_names:
assert_true("chars__" in feat or "words__" in feat)
assert_equal(len(feature_names), 35)
ft = FeatureUnion([("tr1", Transf())]).fit([[1]])
assert_raise_message(AttributeError,
'Transformer tr1 (type Transf) does not provide '
'get_feature_names', ft.get_feature_names)
def test_classes_property():
iris = load_iris()
X = iris.data
y = iris.target
reg = make_pipeline(SelectKBest(k=1), LinearRegression())
reg.fit(X, y)
assert_raises(AttributeError, getattr, reg, "classes_")
clf = make_pipeline(SelectKBest(k=1), LogisticRegression(random_state=0))
assert_raises(AttributeError, getattr, clf, "classes_")
clf.fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
def test_set_feature_union_steps():
mult2 = Mult(2)
mult2.get_feature_names = lambda: ['x2']
mult3 = Mult(3)
mult3.get_feature_names = lambda: ['x3']
mult5 = Mult(5)
mult5.get_feature_names = lambda: ['x5']
ft = FeatureUnion([('m2', mult2), ('m3', mult3)])
assert_array_equal([[2, 3]], ft.transform(np.asarray([[1]])))
assert_equal(['m2__x2', 'm3__x3'], ft.get_feature_names())
# Directly setting attr
ft.transformer_list = [('m5', mult5)]
assert_array_equal([[5]], ft.transform(np.asarray([[1]])))
assert_equal(['m5__x5'], ft.get_feature_names())
# Using set_params
ft.set_params(transformer_list=[('mock', mult3)])
assert_array_equal([[3]], ft.transform(np.asarray([[1]])))
assert_equal(['mock__x3'], ft.get_feature_names())
# Using set_params to replace single step
ft.set_params(mock=mult5)
assert_array_equal([[5]], ft.transform(np.asarray([[1]])))
assert_equal(['mock__x5'], ft.get_feature_names())
def test_set_feature_union_step_none():
mult2 = Mult(2)
mult2.get_feature_names = lambda: ['x2']
mult3 = Mult(3)
mult3.get_feature_names = lambda: ['x3']
X = np.asarray([[1]])
ft = FeatureUnion([('m2', mult2), ('m3', mult3)])
assert_array_equal([[2, 3]], ft.fit(X).transform(X))
assert_array_equal([[2, 3]], ft.fit_transform(X))
assert_equal(['m2__x2', 'm3__x3'], ft.get_feature_names())
ft.set_params(m2=None)
assert_array_equal([[3]], ft.fit(X).transform(X))
assert_array_equal([[3]], ft.fit_transform(X))
assert_equal(['m3__x3'], ft.get_feature_names())
ft.set_params(m3=None)
assert_array_equal([[]], ft.fit(X).transform(X))
assert_array_equal([[]], ft.fit_transform(X))
assert_equal([], ft.get_feature_names())
# check we can change back
ft.set_params(m3=mult3)
assert_array_equal([[3]], ft.fit(X).transform(X))
def test_step_name_validation():
bad_steps1 = [('a__q', Mult(2)), ('b', Mult(3))]
bad_steps2 = [('a', Mult(2)), ('a', Mult(3))]
for cls, param in [(Pipeline, 'steps'),
(FeatureUnion, 'transformer_list')]:
# we validate in construction (despite scikit-learn convention)
bad_steps3 = [('a', Mult(2)), (param, Mult(3))]
for bad_steps, message in [
(bad_steps1, "Step names must not contain __: got ['a__q']"),
(bad_steps2, "Names provided are not unique: ['a', 'a']"),
(bad_steps3, "Step names conflict with constructor "
"arguments: ['%s']" % param),
]:
# three ways to make invalid:
# - construction
assert_raise_message(ValueError, message, cls,
**{param: bad_steps})
# - setattr
est = cls(**{param: [('a', Mult(1))]})
setattr(est, param, bad_steps)
assert_raise_message(ValueError, message, est.fit, [[1]], [1])
assert_raise_message(ValueError, message, est.fit_transform,
[[1]], [1])
# - set_params
est = cls(**{param: [('a', Mult(1))]})
est.set_params(**{param: bad_steps})
assert_raise_message(ValueError, message, est.fit, [[1]], [1])
assert_raise_message(ValueError, message, est.fit_transform,
[[1]], [1])
| bsd-3-clause |
daodaoliang/neural-network-animation | matplotlib/spines.py | 11 | 18621 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import matplotlib
rcParams = matplotlib.rcParams
import matplotlib.artist as martist
from matplotlib.artist import allow_rasterization
from matplotlib import docstring
import matplotlib.transforms as mtransforms
import matplotlib.lines as mlines
import matplotlib.patches as mpatches
import matplotlib.path as mpath
import matplotlib.cbook as cbook
import numpy as np
import warnings
class Spine(mpatches.Patch):
"""an axis spine -- the line noting the data area boundaries
Spines are the lines connecting the axis tick marks and noting the
boundaries of the data area. They can be placed at arbitrary
positions. See function:`~matplotlib.spines.Spine.set_position`
for more information.
The default position is ``('outward',0)``.
Spines are subclasses of class:`~matplotlib.patches.Patch`, and
inherit much of their behavior.
Spines draw a line or a circle, depending if
function:`~matplotlib.spines.Spine.set_patch_line` or
function:`~matplotlib.spines.Spine.set_patch_circle` has been
called. Line-like is the default.
"""
def __str__(self):
return "Spine"
@docstring.dedent_interpd
def __init__(self, axes, spine_type, path, **kwargs):
"""
- *axes* : the Axes instance containing the spine
- *spine_type* : a string specifying the spine type
- *path* : the path instance used to draw the spine
Valid kwargs are:
%(Patch)s
"""
super(Spine, self).__init__(**kwargs)
self.axes = axes
self.set_figure(self.axes.figure)
self.spine_type = spine_type
self.set_facecolor('none')
self.set_edgecolor(rcParams['axes.edgecolor'])
self.set_linewidth(rcParams['axes.linewidth'])
self.set_capstyle('projecting')
self.axis = None
self.set_zorder(2.5)
self.set_transform(self.axes.transData) # default transform
self._bounds = None # default bounds
self._smart_bounds = False
# Defer initial position determination. (Not much support for
# non-rectangular axes is currently implemented, and this lets
# them pass through the spines machinery without errors.)
self._position = None
assert isinstance(path, matplotlib.path.Path)
self._path = path
# To support drawing both linear and circular spines, this
# class implements Patch behavior two ways. If
# self._patch_type == 'line', behave like a mpatches.PathPatch
# instance. If self._patch_type == 'circle', behave like a
# mpatches.Ellipse instance.
self._patch_type = 'line'
# Behavior copied from mpatches.Ellipse:
# Note: This cannot be calculated until this is added to an Axes
self._patch_transform = mtransforms.IdentityTransform()
def set_smart_bounds(self, value):
"""set the spine and associated axis to have smart bounds"""
self._smart_bounds = value
# also set the axis if possible
if self.spine_type in ('left', 'right'):
self.axes.yaxis.set_smart_bounds(value)
elif self.spine_type in ('top', 'bottom'):
self.axes.xaxis.set_smart_bounds(value)
def get_smart_bounds(self):
"""get whether the spine has smart bounds"""
return self._smart_bounds
def set_patch_circle(self, center, radius):
"""set the spine to be circular"""
self._patch_type = 'circle'
self._center = center
self._width = radius * 2
self._height = radius * 2
self._angle = 0
# circle drawn on axes transform
self.set_transform(self.axes.transAxes)
def set_patch_line(self):
"""set the spine to be linear"""
self._patch_type = 'line'
# Behavior copied from mpatches.Ellipse:
def _recompute_transform(self):
"""NOTE: This cannot be called until after this has been added
to an Axes, otherwise unit conversion will fail. This
maxes it very important to call the accessor method and
not directly access the transformation member variable.
"""
assert self._patch_type == 'circle'
center = (self.convert_xunits(self._center[0]),
self.convert_yunits(self._center[1]))
width = self.convert_xunits(self._width)
height = self.convert_yunits(self._height)
self._patch_transform = mtransforms.Affine2D() \
.scale(width * 0.5, height * 0.5) \
.rotate_deg(self._angle) \
.translate(*center)
def get_patch_transform(self):
if self._patch_type == 'circle':
self._recompute_transform()
return self._patch_transform
else:
return super(Spine, self).get_patch_transform()
def get_path(self):
return self._path
def _ensure_position_is_set(self):
if self._position is None:
# default position
self._position = ('outward', 0.0) # in points
self.set_position(self._position)
def register_axis(self, axis):
"""register an axis
An axis should be registered with its corresponding spine from
the Axes instance. This allows the spine to clear any axis
properties when needed.
"""
self.axis = axis
if self.axis is not None:
self.axis.cla()
def cla(self):
"""Clear the current spine"""
self._position = None # clear position
if self.axis is not None:
self.axis.cla()
def is_frame_like(self):
"""return True if directly on axes frame
This is useful for determining if a spine is the edge of an
old style MPL plot. If so, this function will return True.
"""
self._ensure_position_is_set()
position = self._position
if cbook.is_string_like(position):
if position == 'center':
position = ('axes', 0.5)
elif position == 'zero':
position = ('data', 0)
assert len(position) == 2, "position should be 2-tuple"
position_type, amount = position
if position_type == 'outward' and amount == 0:
return True
else:
return False
def _adjust_location(self):
"""automatically set spine bounds to the view interval"""
if self.spine_type == 'circle':
return
if self._bounds is None:
if self.spine_type in ('left', 'right'):
low, high = self.axes.viewLim.intervaly
elif self.spine_type in ('top', 'bottom'):
low, high = self.axes.viewLim.intervalx
else:
raise ValueError('unknown spine spine_type: %s' %
self.spine_type)
if self._smart_bounds:
# attempt to set bounds in sophisticated way
if low > high:
# handle inverted limits
low, high = high, low
viewlim_low = low
viewlim_high = high
del low, high
if self.spine_type in ('left', 'right'):
datalim_low, datalim_high = self.axes.dataLim.intervaly
ticks = self.axes.get_yticks()
elif self.spine_type in ('top', 'bottom'):
datalim_low, datalim_high = self.axes.dataLim.intervalx
ticks = self.axes.get_xticks()
# handle inverted limits
ticks = list(ticks)
ticks.sort()
ticks = np.array(ticks)
if datalim_low > datalim_high:
datalim_low, datalim_high = datalim_high, datalim_low
if datalim_low < viewlim_low:
# Data extends past view. Clip line to view.
low = viewlim_low
else:
# Data ends before view ends.
cond = (ticks <= datalim_low) & (ticks >= viewlim_low)
tickvals = ticks[cond]
if len(tickvals):
# A tick is less than or equal to lowest data point.
low = tickvals[-1]
else:
# No tick is available
low = datalim_low
low = max(low, viewlim_low)
if datalim_high > viewlim_high:
# Data extends past view. Clip line to view.
high = viewlim_high
else:
# Data ends before view ends.
cond = (ticks >= datalim_high) & (ticks <= viewlim_high)
tickvals = ticks[cond]
if len(tickvals):
# A tick is greater than or equal to highest data
# point.
high = tickvals[0]
else:
# No tick is available
high = datalim_high
high = min(high, viewlim_high)
else:
low, high = self._bounds
v1 = self._path.vertices
assert v1.shape == (2, 2), 'unexpected vertices shape'
if self.spine_type in ['left', 'right']:
v1[0, 1] = low
v1[1, 1] = high
elif self.spine_type in ['bottom', 'top']:
v1[0, 0] = low
v1[1, 0] = high
else:
raise ValueError('unable to set bounds for spine "%s"' %
self.spine_type)
@allow_rasterization
def draw(self, renderer):
self._adjust_location()
return super(Spine, self).draw(renderer)
def _calc_offset_transform(self):
"""calculate the offset transform performed by the spine"""
self._ensure_position_is_set()
position = self._position
if cbook.is_string_like(position):
if position == 'center':
position = ('axes', 0.5)
elif position == 'zero':
position = ('data', 0)
assert len(position) == 2, "position should be 2-tuple"
position_type, amount = position
assert position_type in ('axes', 'outward', 'data')
if position_type == 'outward':
if amount == 0:
# short circuit commonest case
self._spine_transform = ('identity',
mtransforms.IdentityTransform())
elif self.spine_type in ['left', 'right', 'top', 'bottom']:
offset_vec = {'left': (-1, 0),
'right': (1, 0),
'bottom': (0, -1),
'top': (0, 1),
}[self.spine_type]
# calculate x and y offset in dots
offset_x = amount * offset_vec[0] / 72.0
offset_y = amount * offset_vec[1] / 72.0
self._spine_transform = ('post',
mtransforms.ScaledTranslation(
offset_x,
offset_y,
self.figure.dpi_scale_trans))
else:
warnings.warn('unknown spine type "%s": no spine '
'offset performed' % self.spine_type)
self._spine_transform = ('identity',
mtransforms.IdentityTransform())
elif position_type == 'axes':
if self.spine_type in ('left', 'right'):
self._spine_transform = ('pre',
mtransforms.Affine2D.from_values(
# keep y unchanged, fix x at
# amount
0, 0, 0, 1, amount, 0))
elif self.spine_type in ('bottom', 'top'):
self._spine_transform = ('pre',
mtransforms.Affine2D.from_values(
# keep x unchanged, fix y at
# amount
1, 0, 0, 0, 0, amount))
else:
warnings.warn('unknown spine type "%s": no spine '
'offset performed' % self.spine_type)
self._spine_transform = ('identity',
mtransforms.IdentityTransform())
elif position_type == 'data':
if self.spine_type in ('right', 'top'):
# The right and top spines have a default position of 1 in
# axes coordinates. When specifying the position in data
# coordinates, we need to calculate the position relative to 0.
amount -= 1
if self.spine_type in ('left', 'right'):
self._spine_transform = ('data',
mtransforms.Affine2D().translate(
amount, 0))
elif self.spine_type in ('bottom', 'top'):
self._spine_transform = ('data',
mtransforms.Affine2D().translate(
0, amount))
else:
warnings.warn('unknown spine type "%s": no spine '
'offset performed' % self.spine_type)
self._spine_transform = ('identity',
mtransforms.IdentityTransform())
def set_position(self, position):
"""set the position of the spine
Spine position is specified by a 2 tuple of (position type,
amount). The position types are:
* 'outward' : place the spine out from the data area by the
specified number of points. (Negative values specify placing the
spine inward.)
* 'axes' : place the spine at the specified Axes coordinate (from
0.0-1.0).
* 'data' : place the spine at the specified data coordinate.
Additionally, shorthand notations define a special positions:
* 'center' -> ('axes',0.5)
* 'zero' -> ('data', 0.0)
"""
if position in ('center', 'zero'):
# special positions
pass
else:
assert len(position) == 2, "position should be 'center' or 2-tuple"
assert position[0] in ['outward', 'axes', 'data']
self._position = position
self._calc_offset_transform()
self.set_transform(self.get_spine_transform())
if self.axis is not None:
self.axis.reset_ticks()
def get_position(self):
"""get the spine position"""
self._ensure_position_is_set()
return self._position
def get_spine_transform(self):
"""get the spine transform"""
self._ensure_position_is_set()
what, how = self._spine_transform
if what == 'data':
# special case data based spine locations
data_xform = self.axes.transScale + \
(how + self.axes.transLimits + self.axes.transAxes)
if self.spine_type in ['left', 'right']:
result = mtransforms.blended_transform_factory(
data_xform, self.axes.transData)
elif self.spine_type in ['top', 'bottom']:
result = mtransforms.blended_transform_factory(
self.axes.transData, data_xform)
else:
raise ValueError('unknown spine spine_type: %s' %
self.spine_type)
return result
if self.spine_type in ['left', 'right']:
base_transform = self.axes.get_yaxis_transform(which='grid')
elif self.spine_type in ['top', 'bottom']:
base_transform = self.axes.get_xaxis_transform(which='grid')
else:
raise ValueError('unknown spine spine_type: %s' %
self.spine_type)
if what == 'identity':
return base_transform
elif what == 'post':
return base_transform + how
elif what == 'pre':
return how + base_transform
else:
raise ValueError("unknown spine_transform type: %s" % what)
def set_bounds(self, low, high):
"""Set the bounds of the spine."""
if self.spine_type == 'circle':
raise ValueError(
'set_bounds() method incompatible with circular spines')
self._bounds = (low, high)
def get_bounds(self):
"""Get the bounds of the spine."""
return self._bounds
@classmethod
def linear_spine(cls, axes, spine_type, **kwargs):
"""
(staticmethod) Returns a linear :class:`Spine`.
"""
# all values of 13 get replaced upon call to set_bounds()
if spine_type == 'left':
path = mpath.Path([(0.0, 13), (0.0, 13)])
elif spine_type == 'right':
path = mpath.Path([(1.0, 13), (1.0, 13)])
elif spine_type == 'bottom':
path = mpath.Path([(13, 0.0), (13, 0.0)])
elif spine_type == 'top':
path = mpath.Path([(13, 1.0), (13, 1.0)])
else:
raise ValueError('unable to make path for spine "%s"' % spine_type)
result = cls(axes, spine_type, path, **kwargs)
return result
@classmethod
def circular_spine(cls, axes, center, radius, **kwargs):
"""
(staticmethod) Returns a circular :class:`Spine`.
"""
path = mpath.Path.unit_circle()
spine_type = 'circle'
result = cls(axes, spine_type, path, **kwargs)
result.set_patch_circle(center, radius)
return result
def set_color(self, c):
"""
Set the edgecolor.
ACCEPTS: matplotlib color arg or sequence of rgba tuples
.. seealso::
:meth:`set_facecolor`, :meth:`set_edgecolor`
For setting the edge or face color individually.
"""
# The facecolor of a spine is always 'none' by default -- let
# the user change it manually if desired.
self.set_edgecolor(c)
| mit |
c-benko/HHG_phasematching_fsEC | test/pvi_scan.py | 2 | 1441 | import sys, os
here = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.normpath(os.path.join(here, '../src')))
from phasematching import *
import matplotlib
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.ticker import MaxNLocator, LinearLocator
num = 20
pscan = np.linspace(0.001, 1, num)
iscan = np.linspace(5, 220, num)
harm = np.array((np.zeros((num, num))))
Lcoh = np.array((np.zeros((num, num))))
ion = np.array((np.zeros((num, num))))
Labs = np.array((np.zeros((num, num))))
Lmed = np.array((np.zeros((num, num))))
phiSS = np.array((np.zeros((num, num))))
dphi = np.array((np.zeros((num, num))))
freq = np.array((np.zeros((num, num))))
pred = np.array((np.zeros((num, num))))
buildup = np.array((np.zeros((num, num))))
for i in range(num):
for j in range(num):
sim = phase_matching('Xe', 13, iscan[i]*.8 , 60e-15, 90e-6, 1070e-9, pscan[j], 0.1e-3, 200, 0, .015,0, 'on')
harm[i,j], Lcoh[i,j], ion[i,j], Labs[i,j], Lmed[i,j], phiSS[i,j], dphi[i,j], freq[i,j], pred[i,j], buildup[i,j] = sim.int_harmonic_yield()
f, ax = plt.subplots()
image = ax.imshow(harm, extent = [min(pscan),max(pscan),min(iscan), max(iscan) ],
aspect='auto', origin='lower',interpolation = 'bicubic')#,vmin=0, vmax=9e28)
im = plt.colorbar(image, ax=ax)
im.set_label('Some Units')
# ax.set_ylim(min(pscan),max(pscan))
# ax.set_xlim(min(iscan), max(iscan))
plt.show() | mit |
caneGuy/spark | python/pyspark/ml/clustering.py | 1 | 58222 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import warnings
from pyspark import since, keyword_only
from pyspark.ml.util import *
from pyspark.ml.wrapper import JavaEstimator, JavaModel, JavaParams, JavaWrapper
from pyspark.ml.param.shared import *
from pyspark.ml.common import inherit_doc
from pyspark.sql import DataFrame
__all__ = ['BisectingKMeans', 'BisectingKMeansModel', 'BisectingKMeansSummary',
'KMeans', 'KMeansModel',
'GaussianMixture', 'GaussianMixtureModel', 'GaussianMixtureSummary',
'LDA', 'LDAModel', 'LocalLDAModel', 'DistributedLDAModel', 'PowerIterationClustering']
class ClusteringSummary(JavaWrapper):
"""
Clustering results for a given model.
.. versionadded:: 2.1.0
"""
@property
@since("2.1.0")
def predictionCol(self):
"""
Name for column of predicted clusters in `predictions`.
"""
return self._call_java("predictionCol")
@property
@since("2.1.0")
def predictions(self):
"""
DataFrame produced by the model's `transform` method.
"""
return self._call_java("predictions")
@property
@since("2.1.0")
def featuresCol(self):
"""
Name for column of features in `predictions`.
"""
return self._call_java("featuresCol")
@property
@since("2.1.0")
def k(self):
"""
The number of clusters the model was trained with.
"""
return self._call_java("k")
@property
@since("2.1.0")
def cluster(self):
"""
DataFrame of predicted cluster centers for each training data point.
"""
return self._call_java("cluster")
@property
@since("2.1.0")
def clusterSizes(self):
"""
Size of (number of data points in) each cluster.
"""
return self._call_java("clusterSizes")
@property
@since("2.4.0")
def numIter(self):
"""
Number of iterations.
"""
return self._call_java("numIter")
@inherit_doc
class _GaussianMixtureParams(HasMaxIter, HasFeaturesCol, HasSeed, HasPredictionCol,
HasProbabilityCol, HasTol, HasAggregationDepth):
"""
Params for :py:class:`GaussianMixture` and :py:class:`GaussianMixtureModel`.
.. versionadded:: 3.0.0
"""
k = Param(Params._dummy(), "k", "Number of independent Gaussians in the mixture model. " +
"Must be > 1.", typeConverter=TypeConverters.toInt)
@since("2.0.0")
def getK(self):
"""
Gets the value of `k`
"""
return self.getOrDefault(self.k)
class GaussianMixtureModel(JavaModel, _GaussianMixtureParams, JavaMLWritable, JavaMLReadable,
HasTrainingSummary):
"""
Model fitted by GaussianMixture.
.. versionadded:: 2.0.0
"""
@since("3.0.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@since("3.0.0")
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
@since("3.0.0")
def setProbabilityCol(self, value):
"""
Sets the value of :py:attr:`probabilityCol`.
"""
return self._set(probabilityCol=value)
@property
@since("2.0.0")
def weights(self):
"""
Weight for each Gaussian distribution in the mixture.
This is a multinomial probability distribution over the k Gaussians,
where weights[i] is the weight for Gaussian i, and weights sum to 1.
"""
return self._call_java("weights")
@property
@since("2.0.0")
def gaussiansDF(self):
"""
Retrieve Gaussian distributions as a DataFrame.
Each row represents a Gaussian Distribution.
The DataFrame has two columns: mean (Vector) and cov (Matrix).
"""
return self._call_java("gaussiansDF")
@property
@since("2.1.0")
def summary(self):
"""
Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists.
"""
if self.hasSummary:
return GaussianMixtureSummary(super(GaussianMixtureModel, self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
@since("3.0.0")
def predict(self, value):
"""
Predict label for the given features.
"""
return self._call_java("predict", value)
@since("3.0.0")
def predictProbability(self, value):
"""
Predict probability for the given features.
"""
return self._call_java("predictProbability", value)
@inherit_doc
class GaussianMixture(JavaEstimator, _GaussianMixtureParams, JavaMLWritable, JavaMLReadable):
"""
GaussianMixture clustering.
This class performs expectation maximization for multivariate Gaussian
Mixture Models (GMMs). A GMM represents a composite distribution of
independent Gaussian distributions with associated "mixing" weights
specifying each's contribution to the composite.
Given a set of sample points, this class will maximize the log-likelihood
for a mixture of k Gaussians, iterating until the log-likelihood changes by
less than convergenceTol, or until it has reached the max number of iterations.
While this process is generally guaranteed to converge, it is not guaranteed
to find a global optimum.
.. note:: For high-dimensional data (with many features), this algorithm may perform poorly.
This is due to high-dimensional data (a) making it difficult to cluster at all
(based on statistical/theoretical arguments) and (b) numerical issues with
Gaussian distributions.
>>> from pyspark.ml.linalg import Vectors
>>> data = [(Vectors.dense([-0.1, -0.05 ]),),
... (Vectors.dense([-0.01, -0.1]),),
... (Vectors.dense([0.9, 0.8]),),
... (Vectors.dense([0.75, 0.935]),),
... (Vectors.dense([-0.83, -0.68]),),
... (Vectors.dense([-0.91, -0.76]),)]
>>> df = spark.createDataFrame(data, ["features"])
>>> gm = GaussianMixture(k=3, tol=0.0001, seed=10)
>>> gm.getMaxIter()
100
>>> gm.setMaxIter(10)
GaussianMixture...
>>> gm.getMaxIter()
10
>>> model = gm.fit(df)
>>> model.getAggregationDepth()
2
>>> model.getFeaturesCol()
'features'
>>> model.setPredictionCol("newPrediction")
GaussianMixture...
>>> model.predict(df.head().features)
2
>>> model.predictProbability(df.head().features)
DenseVector([0.0, 0.4736, 0.5264])
>>> model.hasSummary
True
>>> summary = model.summary
>>> summary.k
3
>>> summary.clusterSizes
[2, 2, 2]
>>> summary.logLikelihood
8.14636...
>>> weights = model.weights
>>> len(weights)
3
>>> model.gaussiansDF.select("mean").head()
Row(mean=DenseVector([0.825, 0.8675]))
>>> model.gaussiansDF.select("cov").head()
Row(cov=DenseMatrix(2, 2, [0.0056, -0.0051, -0.0051, 0.0046], False))
>>> transformed = model.transform(df).select("features", "newPrediction")
>>> rows = transformed.collect()
>>> rows[4].newPrediction == rows[5].newPrediction
True
>>> rows[2].newPrediction == rows[3].newPrediction
True
>>> gmm_path = temp_path + "/gmm"
>>> gm.save(gmm_path)
>>> gm2 = GaussianMixture.load(gmm_path)
>>> gm2.getK()
3
>>> model_path = temp_path + "/gmm_model"
>>> model.save(model_path)
>>> model2 = GaussianMixtureModel.load(model_path)
>>> model2.hasSummary
False
>>> model2.weights == model.weights
True
>>> model2.gaussiansDF.select("mean").head()
Row(mean=DenseVector([0.825, 0.8675]))
>>> model2.gaussiansDF.select("cov").head()
Row(cov=DenseMatrix(2, 2, [0.0056, -0.0051, -0.0051, 0.0046], False))
.. versionadded:: 2.0.0
"""
@keyword_only
def __init__(self, featuresCol="features", predictionCol="prediction", k=2,
probabilityCol="probability", tol=0.01, maxIter=100, seed=None,
aggregationDepth=2):
"""
__init__(self, featuresCol="features", predictionCol="prediction", k=2, \
probabilityCol="probability", tol=0.01, maxIter=100, seed=None, \
aggregationDepth=2)
"""
super(GaussianMixture, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.GaussianMixture",
self.uid)
self._setDefault(k=2, tol=0.01, maxIter=100, aggregationDepth=2)
kwargs = self._input_kwargs
self.setParams(**kwargs)
def _create_model(self, java_model):
return GaussianMixtureModel(java_model)
@keyword_only
@since("2.0.0")
def setParams(self, featuresCol="features", predictionCol="prediction", k=2,
probabilityCol="probability", tol=0.01, maxIter=100, seed=None,
aggregationDepth=2):
"""
setParams(self, featuresCol="features", predictionCol="prediction", k=2, \
probabilityCol="probability", tol=0.01, maxIter=100, seed=None, \
aggregationDepth=2)
Sets params for GaussianMixture.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("2.0.0")
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
@since("2.0.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@since("2.0.0")
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
@since("2.0.0")
def setProbabilityCol(self, value):
"""
Sets the value of :py:attr:`probabilityCol`.
"""
return self._set(probabilityCol=value)
@since("2.0.0")
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
@since("2.0.0")
def setTol(self, value):
"""
Sets the value of :py:attr:`tol`.
"""
return self._set(tol=value)
@since("3.0.0")
def setAggregationDepth(self, value):
"""
Sets the value of :py:attr:`aggregationDepth`.
"""
return self._set(aggregationDepth=value)
class GaussianMixtureSummary(ClusteringSummary):
"""
Gaussian mixture clustering results for a given model.
.. versionadded:: 2.1.0
"""
@property
@since("2.1.0")
def probabilityCol(self):
"""
Name for column of predicted probability of each cluster in `predictions`.
"""
return self._call_java("probabilityCol")
@property
@since("2.1.0")
def probability(self):
"""
DataFrame of probabilities of each cluster for each training data point.
"""
return self._call_java("probability")
@property
@since("2.2.0")
def logLikelihood(self):
"""
Total log-likelihood for this model on the given data.
"""
return self._call_java("logLikelihood")
class KMeansSummary(ClusteringSummary):
"""
Summary of KMeans.
.. versionadded:: 2.1.0
"""
@property
@since("2.4.0")
def trainingCost(self):
"""
K-means cost (sum of squared distances to the nearest centroid for all points in the
training dataset). This is equivalent to sklearn's inertia.
"""
return self._call_java("trainingCost")
@inherit_doc
class _KMeansParams(HasMaxIter, HasFeaturesCol, HasSeed, HasPredictionCol, HasTol,
HasDistanceMeasure):
"""
Params for :py:class:`KMeans` and :py:class:`KMeansModel`.
.. versionadded:: 3.0.0
"""
k = Param(Params._dummy(), "k", "The number of clusters to create. Must be > 1.",
typeConverter=TypeConverters.toInt)
initMode = Param(Params._dummy(), "initMode",
"The initialization algorithm. This can be either \"random\" to " +
"choose random points as initial cluster centers, or \"k-means||\" " +
"to use a parallel variant of k-means++",
typeConverter=TypeConverters.toString)
initSteps = Param(Params._dummy(), "initSteps", "The number of steps for k-means|| " +
"initialization mode. Must be > 0.", typeConverter=TypeConverters.toInt)
@since("1.5.0")
def getK(self):
"""
Gets the value of `k`
"""
return self.getOrDefault(self.k)
@since("1.5.0")
def getInitMode(self):
"""
Gets the value of `initMode`
"""
return self.getOrDefault(self.initMode)
@since("1.5.0")
def getInitSteps(self):
"""
Gets the value of `initSteps`
"""
return self.getOrDefault(self.initSteps)
class KMeansModel(JavaModel, _KMeansParams, GeneralJavaMLWritable, JavaMLReadable,
HasTrainingSummary):
"""
Model fitted by KMeans.
.. versionadded:: 1.5.0
"""
@since("3.0.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@since("3.0.0")
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
@since("1.5.0")
def clusterCenters(self):
"""Get the cluster centers, represented as a list of NumPy arrays."""
return [c.toArray() for c in self._call_java("clusterCenters")]
@property
@since("2.1.0")
def summary(self):
"""
Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists.
"""
if self.hasSummary:
return KMeansSummary(super(KMeansModel, self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
@since("3.0.0")
def predict(self, value):
"""
Predict label for the given features.
"""
return self._call_java("predict", value)
@inherit_doc
class KMeans(JavaEstimator, _KMeansParams, JavaMLWritable, JavaMLReadable):
"""
K-means clustering with a k-means++ like initialization mode
(the k-means|| algorithm by Bahmani et al).
>>> from pyspark.ml.linalg import Vectors
>>> data = [(Vectors.dense([0.0, 0.0]),), (Vectors.dense([1.0, 1.0]),),
... (Vectors.dense([9.0, 8.0]),), (Vectors.dense([8.0, 9.0]),)]
>>> df = spark.createDataFrame(data, ["features"])
>>> kmeans = KMeans(k=2)
>>> kmeans.setSeed(1)
KMeans...
>>> kmeans.setMaxIter(10)
KMeans...
>>> kmeans.getMaxIter()
10
>>> kmeans.clear(kmeans.maxIter)
>>> model = kmeans.fit(df)
>>> model.getDistanceMeasure()
'euclidean'
>>> model.setPredictionCol("newPrediction")
KMeans...
>>> model.predict(df.head().features)
0
>>> centers = model.clusterCenters()
>>> len(centers)
2
>>> transformed = model.transform(df).select("features", "newPrediction")
>>> rows = transformed.collect()
>>> rows[0].newPrediction == rows[1].newPrediction
True
>>> rows[2].newPrediction == rows[3].newPrediction
True
>>> model.hasSummary
True
>>> summary = model.summary
>>> summary.k
2
>>> summary.clusterSizes
[2, 2]
>>> summary.trainingCost
2.0
>>> kmeans_path = temp_path + "/kmeans"
>>> kmeans.save(kmeans_path)
>>> kmeans2 = KMeans.load(kmeans_path)
>>> kmeans2.getK()
2
>>> model_path = temp_path + "/kmeans_model"
>>> model.save(model_path)
>>> model2 = KMeansModel.load(model_path)
>>> model2.hasSummary
False
>>> model.clusterCenters()[0] == model2.clusterCenters()[0]
array([ True, True], dtype=bool)
>>> model.clusterCenters()[1] == model2.clusterCenters()[1]
array([ True, True], dtype=bool)
.. versionadded:: 1.5.0
"""
@keyword_only
def __init__(self, featuresCol="features", predictionCol="prediction", k=2,
initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None,
distanceMeasure="euclidean"):
"""
__init__(self, featuresCol="features", predictionCol="prediction", k=2, \
initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None, \
distanceMeasure="euclidean")
"""
super(KMeans, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.KMeans", self.uid)
self._setDefault(k=2, initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20,
distanceMeasure="euclidean")
kwargs = self._input_kwargs
self.setParams(**kwargs)
def _create_model(self, java_model):
return KMeansModel(java_model)
@keyword_only
@since("1.5.0")
def setParams(self, featuresCol="features", predictionCol="prediction", k=2,
initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None,
distanceMeasure="euclidean"):
"""
setParams(self, featuresCol="features", predictionCol="prediction", k=2, \
initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None, \
distanceMeasure="euclidean")
Sets params for KMeans.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.5.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("1.5.0")
def setInitMode(self, value):
"""
Sets the value of :py:attr:`initMode`.
"""
return self._set(initMode=value)
@since("1.5.0")
def setInitSteps(self, value):
"""
Sets the value of :py:attr:`initSteps`.
"""
return self._set(initSteps=value)
@since("2.4.0")
def setDistanceMeasure(self, value):
"""
Sets the value of :py:attr:`distanceMeasure`.
"""
return self._set(distanceMeasure=value)
@since("1.5.0")
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
@since("1.5.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@since("1.5.0")
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
@since("1.5.0")
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
@since("1.5.0")
def setTol(self, value):
"""
Sets the value of :py:attr:`tol`.
"""
return self._set(tol=value)
@inherit_doc
class _BisectingKMeansParams(HasMaxIter, HasFeaturesCol, HasSeed, HasPredictionCol,
HasDistanceMeasure):
"""
Params for :py:class:`BisectingKMeans` and :py:class:`BisectingKMeansModel`.
.. versionadded:: 3.0.0
"""
k = Param(Params._dummy(), "k", "The desired number of leaf clusters. Must be > 1.",
typeConverter=TypeConverters.toInt)
minDivisibleClusterSize = Param(Params._dummy(), "minDivisibleClusterSize",
"The minimum number of points (if >= 1.0) or the minimum " +
"proportion of points (if < 1.0) of a divisible cluster.",
typeConverter=TypeConverters.toFloat)
@since("2.0.0")
def getK(self):
"""
Gets the value of `k` or its default value.
"""
return self.getOrDefault(self.k)
@since("2.0.0")
def getMinDivisibleClusterSize(self):
"""
Gets the value of `minDivisibleClusterSize` or its default value.
"""
return self.getOrDefault(self.minDivisibleClusterSize)
class BisectingKMeansModel(JavaModel, _BisectingKMeansParams, JavaMLWritable, JavaMLReadable,
HasTrainingSummary):
"""
Model fitted by BisectingKMeans.
.. versionadded:: 2.0.0
"""
@since("3.0.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@since("3.0.0")
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
@since("2.0.0")
def clusterCenters(self):
"""Get the cluster centers, represented as a list of NumPy arrays."""
return [c.toArray() for c in self._call_java("clusterCenters")]
@since("2.0.0")
def computeCost(self, dataset):
"""
Computes the sum of squared distances between the input points
and their corresponding cluster centers.
..note:: Deprecated in 3.0.0. It will be removed in future versions. Use
ClusteringEvaluator instead. You can also get the cost on the training dataset in the
summary.
"""
warnings.warn("Deprecated in 3.0.0. It will be removed in future versions. Use "
"ClusteringEvaluator instead. You can also get the cost on the training "
"dataset in the summary.", DeprecationWarning)
return self._call_java("computeCost", dataset)
@property
@since("2.1.0")
def summary(self):
"""
Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists.
"""
if self.hasSummary:
return BisectingKMeansSummary(super(BisectingKMeansModel, self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
@since("3.0.0")
def predict(self, value):
"""
Predict label for the given features.
"""
return self._call_java("predict", value)
@inherit_doc
class BisectingKMeans(JavaEstimator, _BisectingKMeansParams, JavaMLWritable, JavaMLReadable):
"""
A bisecting k-means algorithm based on the paper "A comparison of document clustering
techniques" by Steinbach, Karypis, and Kumar, with modification to fit Spark.
The algorithm starts from a single cluster that contains all points.
Iteratively it finds divisible clusters on the bottom level and bisects each of them using
k-means, until there are `k` leaf clusters in total or no leaf clusters are divisible.
The bisecting steps of clusters on the same level are grouped together to increase parallelism.
If bisecting all divisible clusters on the bottom level would result more than `k` leaf
clusters, larger clusters get higher priority.
>>> from pyspark.ml.linalg import Vectors
>>> data = [(Vectors.dense([0.0, 0.0]),), (Vectors.dense([1.0, 1.0]),),
... (Vectors.dense([9.0, 8.0]),), (Vectors.dense([8.0, 9.0]),)]
>>> df = spark.createDataFrame(data, ["features"])
>>> bkm = BisectingKMeans(k=2, minDivisibleClusterSize=1.0)
>>> bkm.setMaxIter(10)
BisectingKMeans...
>>> bkm.getMaxIter()
10
>>> bkm.clear(bkm.maxIter)
>>> bkm.setSeed(1)
BisectingKMeans...
>>> bkm.getSeed()
1
>>> bkm.clear(bkm.seed)
>>> model = bkm.fit(df)
>>> model.getMaxIter()
20
>>> model.setPredictionCol("newPrediction")
BisectingKMeans...
>>> model.predict(df.head().features)
0
>>> centers = model.clusterCenters()
>>> len(centers)
2
>>> model.computeCost(df)
2.0
>>> model.hasSummary
True
>>> summary = model.summary
>>> summary.k
2
>>> summary.clusterSizes
[2, 2]
>>> summary.trainingCost
2.000...
>>> transformed = model.transform(df).select("features", "newPrediction")
>>> rows = transformed.collect()
>>> rows[0].newPrediction == rows[1].newPrediction
True
>>> rows[2].newPrediction == rows[3].newPrediction
True
>>> bkm_path = temp_path + "/bkm"
>>> bkm.save(bkm_path)
>>> bkm2 = BisectingKMeans.load(bkm_path)
>>> bkm2.getK()
2
>>> bkm2.getDistanceMeasure()
'euclidean'
>>> model_path = temp_path + "/bkm_model"
>>> model.save(model_path)
>>> model2 = BisectingKMeansModel.load(model_path)
>>> model2.hasSummary
False
>>> model.clusterCenters()[0] == model2.clusterCenters()[0]
array([ True, True], dtype=bool)
>>> model.clusterCenters()[1] == model2.clusterCenters()[1]
array([ True, True], dtype=bool)
.. versionadded:: 2.0.0
"""
@keyword_only
def __init__(self, featuresCol="features", predictionCol="prediction", maxIter=20,
seed=None, k=4, minDivisibleClusterSize=1.0, distanceMeasure="euclidean"):
"""
__init__(self, featuresCol="features", predictionCol="prediction", maxIter=20, \
seed=None, k=4, minDivisibleClusterSize=1.0, distanceMeasure="euclidean")
"""
super(BisectingKMeans, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.BisectingKMeans",
self.uid)
self._setDefault(maxIter=20, k=4, minDivisibleClusterSize=1.0)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.0.0")
def setParams(self, featuresCol="features", predictionCol="prediction", maxIter=20,
seed=None, k=4, minDivisibleClusterSize=1.0, distanceMeasure="euclidean"):
"""
setParams(self, featuresCol="features", predictionCol="prediction", maxIter=20, \
seed=None, k=4, minDivisibleClusterSize=1.0, distanceMeasure="euclidean")
Sets params for BisectingKMeans.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("2.0.0")
def setMinDivisibleClusterSize(self, value):
"""
Sets the value of :py:attr:`minDivisibleClusterSize`.
"""
return self._set(minDivisibleClusterSize=value)
@since("2.4.0")
def setDistanceMeasure(self, value):
"""
Sets the value of :py:attr:`distanceMeasure`.
"""
return self._set(distanceMeasure=value)
@since("2.0.0")
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
@since("2.0.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@since("2.0.0")
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
@since("2.0.0")
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
def _create_model(self, java_model):
return BisectingKMeansModel(java_model)
class BisectingKMeansSummary(ClusteringSummary):
"""
Bisecting KMeans clustering results for a given model.
.. versionadded:: 2.1.0
"""
@property
@since("3.0.0")
def trainingCost(self):
"""
Sum of squared distances to the nearest centroid for all points in the training dataset.
This is equivalent to sklearn's inertia.
"""
return self._call_java("trainingCost")
@inherit_doc
class _LDAParams(HasMaxIter, HasFeaturesCol, HasSeed, HasCheckpointInterval):
"""
Params for :py:class:`LDA` and :py:class:`LDAModel`.
.. versionadded:: 3.0.0
"""
k = Param(Params._dummy(), "k", "The number of topics (clusters) to infer. Must be > 1.",
typeConverter=TypeConverters.toInt)
optimizer = Param(Params._dummy(), "optimizer",
"Optimizer or inference algorithm used to estimate the LDA model. "
"Supported: online, em", typeConverter=TypeConverters.toString)
learningOffset = Param(Params._dummy(), "learningOffset",
"A (positive) learning parameter that downweights early iterations."
" Larger values make early iterations count less",
typeConverter=TypeConverters.toFloat)
learningDecay = Param(Params._dummy(), "learningDecay", "Learning rate, set as an"
"exponential decay rate. This should be between (0.5, 1.0] to "
"guarantee asymptotic convergence.", typeConverter=TypeConverters.toFloat)
subsamplingRate = Param(Params._dummy(), "subsamplingRate",
"Fraction of the corpus to be sampled and used in each iteration "
"of mini-batch gradient descent, in range (0, 1].",
typeConverter=TypeConverters.toFloat)
optimizeDocConcentration = Param(Params._dummy(), "optimizeDocConcentration",
"Indicates whether the docConcentration (Dirichlet parameter "
"for document-topic distribution) will be optimized during "
"training.", typeConverter=TypeConverters.toBoolean)
docConcentration = Param(Params._dummy(), "docConcentration",
"Concentration parameter (commonly named \"alpha\") for the "
"prior placed on documents' distributions over topics (\"theta\").",
typeConverter=TypeConverters.toListFloat)
topicConcentration = Param(Params._dummy(), "topicConcentration",
"Concentration parameter (commonly named \"beta\" or \"eta\") for "
"the prior placed on topic' distributions over terms.",
typeConverter=TypeConverters.toFloat)
topicDistributionCol = Param(Params._dummy(), "topicDistributionCol",
"Output column with estimates of the topic mixture distribution "
"for each document (often called \"theta\" in the literature). "
"Returns a vector of zeros for an empty document.",
typeConverter=TypeConverters.toString)
keepLastCheckpoint = Param(Params._dummy(), "keepLastCheckpoint",
"(For EM optimizer) If using checkpointing, this indicates whether"
" to keep the last checkpoint. If false, then the checkpoint will be"
" deleted. Deleting the checkpoint can cause failures if a data"
" partition is lost, so set this bit with care.",
TypeConverters.toBoolean)
@since("2.0.0")
def getK(self):
"""
Gets the value of :py:attr:`k` or its default value.
"""
return self.getOrDefault(self.k)
@since("2.0.0")
def getOptimizer(self):
"""
Gets the value of :py:attr:`optimizer` or its default value.
"""
return self.getOrDefault(self.optimizer)
@since("2.0.0")
def getLearningOffset(self):
"""
Gets the value of :py:attr:`learningOffset` or its default value.
"""
return self.getOrDefault(self.learningOffset)
@since("2.0.0")
def getLearningDecay(self):
"""
Gets the value of :py:attr:`learningDecay` or its default value.
"""
return self.getOrDefault(self.learningDecay)
@since("2.0.0")
def getSubsamplingRate(self):
"""
Gets the value of :py:attr:`subsamplingRate` or its default value.
"""
return self.getOrDefault(self.subsamplingRate)
@since("2.0.0")
def getOptimizeDocConcentration(self):
"""
Gets the value of :py:attr:`optimizeDocConcentration` or its default value.
"""
return self.getOrDefault(self.optimizeDocConcentration)
@since("2.0.0")
def getDocConcentration(self):
"""
Gets the value of :py:attr:`docConcentration` or its default value.
"""
return self.getOrDefault(self.docConcentration)
@since("2.0.0")
def getTopicConcentration(self):
"""
Gets the value of :py:attr:`topicConcentration` or its default value.
"""
return self.getOrDefault(self.topicConcentration)
@since("2.0.0")
def getTopicDistributionCol(self):
"""
Gets the value of :py:attr:`topicDistributionCol` or its default value.
"""
return self.getOrDefault(self.topicDistributionCol)
@since("2.0.0")
def getKeepLastCheckpoint(self):
"""
Gets the value of :py:attr:`keepLastCheckpoint` or its default value.
"""
return self.getOrDefault(self.keepLastCheckpoint)
@inherit_doc
class LDAModel(JavaModel, _LDAParams):
"""
Latent Dirichlet Allocation (LDA) model.
This abstraction permits for different underlying representations,
including local and distributed data structures.
.. versionadded:: 2.0.0
"""
@since("3.0.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@since("3.0.0")
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
@since("3.0.0")
def setTopicDistributionCol(self, value):
"""
Sets the value of :py:attr:`topicDistributionCol`.
>>> algo = LDA().setTopicDistributionCol("topicDistributionCol")
>>> algo.getTopicDistributionCol()
'topicDistributionCol'
"""
return self._set(topicDistributionCol=value)
@since("2.0.0")
def isDistributed(self):
"""
Indicates whether this instance is of type DistributedLDAModel
"""
return self._call_java("isDistributed")
@since("2.0.0")
def vocabSize(self):
"""Vocabulary size (number of terms or words in the vocabulary)"""
return self._call_java("vocabSize")
@since("2.0.0")
def topicsMatrix(self):
"""
Inferred topics, where each topic is represented by a distribution over terms.
This is a matrix of size vocabSize x k, where each column is a topic.
No guarantees are given about the ordering of the topics.
WARNING: If this model is actually a :py:class:`DistributedLDAModel` instance produced by
the Expectation-Maximization ("em") `optimizer`, then this method could involve
collecting a large amount of data to the driver (on the order of vocabSize x k).
"""
return self._call_java("topicsMatrix")
@since("2.0.0")
def logLikelihood(self, dataset):
"""
Calculates a lower bound on the log likelihood of the entire corpus.
See Equation (16) in the Online LDA paper (Hoffman et al., 2010).
WARNING: If this model is an instance of :py:class:`DistributedLDAModel` (produced when
:py:attr:`optimizer` is set to "em"), this involves collecting a large
:py:func:`topicsMatrix` to the driver. This implementation may be changed in the future.
"""
return self._call_java("logLikelihood", dataset)
@since("2.0.0")
def logPerplexity(self, dataset):
"""
Calculate an upper bound on perplexity. (Lower is better.)
See Equation (16) in the Online LDA paper (Hoffman et al., 2010).
WARNING: If this model is an instance of :py:class:`DistributedLDAModel` (produced when
:py:attr:`optimizer` is set to "em"), this involves collecting a large
:py:func:`topicsMatrix` to the driver. This implementation may be changed in the future.
"""
return self._call_java("logPerplexity", dataset)
@since("2.0.0")
def describeTopics(self, maxTermsPerTopic=10):
"""
Return the topics described by their top-weighted terms.
"""
return self._call_java("describeTopics", maxTermsPerTopic)
@since("2.0.0")
def estimatedDocConcentration(self):
"""
Value for :py:attr:`LDA.docConcentration` estimated from data.
If Online LDA was used and :py:attr:`LDA.optimizeDocConcentration` was set to false,
then this returns the fixed (given) value for the :py:attr:`LDA.docConcentration` parameter.
"""
return self._call_java("estimatedDocConcentration")
@inherit_doc
class DistributedLDAModel(LDAModel, JavaMLReadable, JavaMLWritable):
"""
Distributed model fitted by :py:class:`LDA`.
This type of model is currently only produced by Expectation-Maximization (EM).
This model stores the inferred topics, the full training dataset, and the topic distribution
for each training document.
.. versionadded:: 2.0.0
"""
@since("2.0.0")
def toLocal(self):
"""
Convert this distributed model to a local representation. This discards info about the
training dataset.
WARNING: This involves collecting a large :py:func:`topicsMatrix` to the driver.
"""
model = LocalLDAModel(self._call_java("toLocal"))
# SPARK-10931: Temporary fix to be removed once LDAModel defines Params
model._create_params_from_java()
model._transfer_params_from_java()
return model
@since("2.0.0")
def trainingLogLikelihood(self):
"""
Log likelihood of the observed tokens in the training set,
given the current parameter estimates:
log P(docs | topics, topic distributions for docs, Dirichlet hyperparameters)
Notes:
- This excludes the prior; for that, use :py:func:`logPrior`.
- Even with :py:func:`logPrior`, this is NOT the same as the data log likelihood given
the hyperparameters.
- This is computed from the topic distributions computed during training. If you call
:py:func:`logLikelihood` on the same training dataset, the topic distributions
will be computed again, possibly giving different results.
"""
return self._call_java("trainingLogLikelihood")
@since("2.0.0")
def logPrior(self):
"""
Log probability of the current parameter estimate:
log P(topics, topic distributions for docs | alpha, eta)
"""
return self._call_java("logPrior")
@since("2.0.0")
def getCheckpointFiles(self):
"""
If using checkpointing and :py:attr:`LDA.keepLastCheckpoint` is set to true, then there may
be saved checkpoint files. This method is provided so that users can manage those files.
.. note:: Removing the checkpoints can cause failures if a partition is lost and is needed
by certain :py:class:`DistributedLDAModel` methods. Reference counting will clean up
the checkpoints when this model and derivative data go out of scope.
:return List of checkpoint files from training
"""
return self._call_java("getCheckpointFiles")
@inherit_doc
class LocalLDAModel(LDAModel, JavaMLReadable, JavaMLWritable):
"""
Local (non-distributed) model fitted by :py:class:`LDA`.
This model stores the inferred topics only; it does not store info about the training dataset.
.. versionadded:: 2.0.0
"""
pass
@inherit_doc
class LDA(JavaEstimator, _LDAParams, JavaMLReadable, JavaMLWritable):
"""
Latent Dirichlet Allocation (LDA), a topic model designed for text documents.
Terminology:
- "term" = "word": an element of the vocabulary
- "token": instance of a term appearing in a document
- "topic": multinomial distribution over terms representing some concept
- "document": one piece of text, corresponding to one row in the input data
Original LDA paper (journal version):
Blei, Ng, and Jordan. "Latent Dirichlet Allocation." JMLR, 2003.
Input data (featuresCol):
LDA is given a collection of documents as input data, via the featuresCol parameter.
Each document is specified as a :py:class:`Vector` of length vocabSize, where each entry is the
count for the corresponding term (word) in the document. Feature transformers such as
:py:class:`pyspark.ml.feature.Tokenizer` and :py:class:`pyspark.ml.feature.CountVectorizer`
can be useful for converting text to word count vectors.
>>> from pyspark.ml.linalg import Vectors, SparseVector
>>> from pyspark.ml.clustering import LDA
>>> df = spark.createDataFrame([[1, Vectors.dense([0.0, 1.0])],
... [2, SparseVector(2, {0: 1.0})],], ["id", "features"])
>>> lda = LDA(k=2, seed=1, optimizer="em")
>>> lda.setMaxIter(10)
LDA...
>>> lda.getMaxIter()
10
>>> lda.clear(lda.maxIter)
>>> model = lda.fit(df)
>>> model.getTopicDistributionCol()
'topicDistribution'
>>> model.isDistributed()
True
>>> localModel = model.toLocal()
>>> localModel.isDistributed()
False
>>> model.vocabSize()
2
>>> model.describeTopics().show()
+-----+-----------+--------------------+
|topic|termIndices| termWeights|
+-----+-----------+--------------------+
| 0| [1, 0]|[0.50401530077160...|
| 1| [0, 1]|[0.50401530077160...|
+-----+-----------+--------------------+
...
>>> model.topicsMatrix()
DenseMatrix(2, 2, [0.496, 0.504, 0.504, 0.496], 0)
>>> lda_path = temp_path + "/lda"
>>> lda.save(lda_path)
>>> sameLDA = LDA.load(lda_path)
>>> distributed_model_path = temp_path + "/lda_distributed_model"
>>> model.save(distributed_model_path)
>>> sameModel = DistributedLDAModel.load(distributed_model_path)
>>> local_model_path = temp_path + "/lda_local_model"
>>> localModel.save(local_model_path)
>>> sameLocalModel = LocalLDAModel.load(local_model_path)
.. versionadded:: 2.0.0
"""
@keyword_only
def __init__(self, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,
subsamplingRate=0.05, optimizeDocConcentration=True,
docConcentration=None, topicConcentration=None,
topicDistributionCol="topicDistribution", keepLastCheckpoint=True):
"""
__init__(self, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,\
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,\
subsamplingRate=0.05, optimizeDocConcentration=True,\
docConcentration=None, topicConcentration=None,\
topicDistributionCol="topicDistribution", keepLastCheckpoint=True)
"""
super(LDA, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.LDA", self.uid)
self._setDefault(maxIter=20, checkpointInterval=10,
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,
subsamplingRate=0.05, optimizeDocConcentration=True,
topicDistributionCol="topicDistribution", keepLastCheckpoint=True)
kwargs = self._input_kwargs
self.setParams(**kwargs)
def _create_model(self, java_model):
if self.getOptimizer() == "em":
return DistributedLDAModel(java_model)
else:
return LocalLDAModel(java_model)
@keyword_only
@since("2.0.0")
def setParams(self, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,
subsamplingRate=0.05, optimizeDocConcentration=True,
docConcentration=None, topicConcentration=None,
topicDistributionCol="topicDistribution", keepLastCheckpoint=True):
"""
setParams(self, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,\
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,\
subsamplingRate=0.05, optimizeDocConcentration=True,\
docConcentration=None, topicConcentration=None,\
topicDistributionCol="topicDistribution", keepLastCheckpoint=True)
Sets params for LDA.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setCheckpointInterval(self, value):
"""
Sets the value of :py:attr:`checkpointInterval`.
"""
return self._set(checkpointInterval=value)
@since("2.0.0")
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
@since("2.0.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
>>> algo = LDA().setK(10)
>>> algo.getK()
10
"""
return self._set(k=value)
@since("2.0.0")
def setOptimizer(self, value):
"""
Sets the value of :py:attr:`optimizer`.
Currently only support 'em' and 'online'.
>>> algo = LDA().setOptimizer("em")
>>> algo.getOptimizer()
'em'
"""
return self._set(optimizer=value)
@since("2.0.0")
def setLearningOffset(self, value):
"""
Sets the value of :py:attr:`learningOffset`.
>>> algo = LDA().setLearningOffset(100)
>>> algo.getLearningOffset()
100.0
"""
return self._set(learningOffset=value)
@since("2.0.0")
def setLearningDecay(self, value):
"""
Sets the value of :py:attr:`learningDecay`.
>>> algo = LDA().setLearningDecay(0.1)
>>> algo.getLearningDecay()
0.1...
"""
return self._set(learningDecay=value)
@since("2.0.0")
def setSubsamplingRate(self, value):
"""
Sets the value of :py:attr:`subsamplingRate`.
>>> algo = LDA().setSubsamplingRate(0.1)
>>> algo.getSubsamplingRate()
0.1...
"""
return self._set(subsamplingRate=value)
@since("2.0.0")
def setOptimizeDocConcentration(self, value):
"""
Sets the value of :py:attr:`optimizeDocConcentration`.
>>> algo = LDA().setOptimizeDocConcentration(True)
>>> algo.getOptimizeDocConcentration()
True
"""
return self._set(optimizeDocConcentration=value)
@since("2.0.0")
def setDocConcentration(self, value):
"""
Sets the value of :py:attr:`docConcentration`.
>>> algo = LDA().setDocConcentration([0.1, 0.2])
>>> algo.getDocConcentration()
[0.1..., 0.2...]
"""
return self._set(docConcentration=value)
@since("2.0.0")
def setTopicConcentration(self, value):
"""
Sets the value of :py:attr:`topicConcentration`.
>>> algo = LDA().setTopicConcentration(0.5)
>>> algo.getTopicConcentration()
0.5...
"""
return self._set(topicConcentration=value)
@since("2.0.0")
def setTopicDistributionCol(self, value):
"""
Sets the value of :py:attr:`topicDistributionCol`.
>>> algo = LDA().setTopicDistributionCol("topicDistributionCol")
>>> algo.getTopicDistributionCol()
'topicDistributionCol'
"""
return self._set(topicDistributionCol=value)
@since("2.0.0")
def setKeepLastCheckpoint(self, value):
"""
Sets the value of :py:attr:`keepLastCheckpoint`.
>>> algo = LDA().setKeepLastCheckpoint(False)
>>> algo.getKeepLastCheckpoint()
False
"""
return self._set(keepLastCheckpoint=value)
@since("2.0.0")
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
@since("2.0.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@inherit_doc
class _PowerIterationClusteringParams(HasMaxIter, HasWeightCol):
"""
Params for :py:class:`PowerIterationClustering`.
.. versionadded:: 3.0.0
"""
k = Param(Params._dummy(), "k",
"The number of clusters to create. Must be > 1.",
typeConverter=TypeConverters.toInt)
initMode = Param(Params._dummy(), "initMode",
"The initialization algorithm. This can be either " +
"'random' to use a random vector as vertex properties, or 'degree' to use " +
"a normalized sum of similarities with other vertices. Supported options: " +
"'random' and 'degree'.",
typeConverter=TypeConverters.toString)
srcCol = Param(Params._dummy(), "srcCol",
"Name of the input column for source vertex IDs.",
typeConverter=TypeConverters.toString)
dstCol = Param(Params._dummy(), "dstCol",
"Name of the input column for destination vertex IDs.",
typeConverter=TypeConverters.toString)
@since("2.4.0")
def getK(self):
"""
Gets the value of :py:attr:`k` or its default value.
"""
return self.getOrDefault(self.k)
@since("2.4.0")
def getInitMode(self):
"""
Gets the value of :py:attr:`initMode` or its default value.
"""
return self.getOrDefault(self.initMode)
@since("2.4.0")
def getSrcCol(self):
"""
Gets the value of :py:attr:`srcCol` or its default value.
"""
return self.getOrDefault(self.srcCol)
@since("2.4.0")
def getDstCol(self):
"""
Gets the value of :py:attr:`dstCol` or its default value.
"""
return self.getOrDefault(self.dstCol)
@inherit_doc
class PowerIterationClustering(_PowerIterationClusteringParams, JavaParams, JavaMLReadable,
JavaMLWritable):
"""
Power Iteration Clustering (PIC), a scalable graph clustering algorithm developed by
`Lin and Cohen <http://www.cs.cmu.edu/~frank/papers/icml2010-pic-final.pdf>`_. From the
abstract: PIC finds a very low-dimensional embedding of a dataset using truncated power
iteration on a normalized pair-wise similarity matrix of the data.
This class is not yet an Estimator/Transformer, use :py:func:`assignClusters` method
to run the PowerIterationClustering algorithm.
.. seealso:: `Wikipedia on Spectral clustering
<http://en.wikipedia.org/wiki/Spectral_clustering>`_
>>> data = [(1, 0, 0.5),
... (2, 0, 0.5), (2, 1, 0.7),
... (3, 0, 0.5), (3, 1, 0.7), (3, 2, 0.9),
... (4, 0, 0.5), (4, 1, 0.7), (4, 2, 0.9), (4, 3, 1.1),
... (5, 0, 0.5), (5, 1, 0.7), (5, 2, 0.9), (5, 3, 1.1), (5, 4, 1.3)]
>>> df = spark.createDataFrame(data).toDF("src", "dst", "weight").repartition(1)
>>> pic = PowerIterationClustering(k=2, weightCol="weight")
>>> pic.setMaxIter(40)
PowerIterationClustering...
>>> assignments = pic.assignClusters(df)
>>> assignments.sort(assignments.id).show(truncate=False)
+---+-------+
|id |cluster|
+---+-------+
|0 |0 |
|1 |0 |
|2 |0 |
|3 |0 |
|4 |0 |
|5 |1 |
+---+-------+
...
>>> pic_path = temp_path + "/pic"
>>> pic.save(pic_path)
>>> pic2 = PowerIterationClustering.load(pic_path)
>>> pic2.getK()
2
>>> pic2.getMaxIter()
40
.. versionadded:: 2.4.0
"""
@keyword_only
def __init__(self, k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst",
weightCol=None):
"""
__init__(self, k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst",\
weightCol=None)
"""
super(PowerIterationClustering, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.clustering.PowerIterationClustering", self.uid)
self._setDefault(k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst")
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.4.0")
def setParams(self, k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst",
weightCol=None):
"""
setParams(self, k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst",\
weightCol=None)
Sets params for PowerIterationClustering.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.4.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("2.4.0")
def setInitMode(self, value):
"""
Sets the value of :py:attr:`initMode`.
"""
return self._set(initMode=value)
@since("2.4.0")
def setSrcCol(self, value):
"""
Sets the value of :py:attr:`srcCol`.
"""
return self._set(srcCol=value)
@since("2.4.0")
def setDstCol(self, value):
"""
Sets the value of :py:attr:`dstCol`.
"""
return self._set(dstCol=value)
@since("2.4.0")
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
@since("2.4.0")
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
@since("2.4.0")
def assignClusters(self, dataset):
"""
Run the PIC algorithm and returns a cluster assignment for each input vertex.
:param dataset:
A dataset with columns src, dst, weight representing the affinity matrix,
which is the matrix A in the PIC paper. Suppose the src column value is i,
the dst column value is j, the weight column value is similarity s,,ij,,
which must be nonnegative. This is a symmetric matrix and hence
s,,ij,, = s,,ji,,. For any (i, j) with nonzero similarity, there should be
either (i, j, s,,ij,,) or (j, i, s,,ji,,) in the input. Rows with i = j are
ignored, because we assume s,,ij,, = 0.0.
:return:
A dataset that contains columns of vertex id and the corresponding cluster for
the id. The schema of it will be:
- id: Long
- cluster: Int
.. versionadded:: 2.4.0
"""
self._transfer_params_to_java()
jdf = self._java_obj.assignClusters(dataset._jdf)
return DataFrame(jdf, dataset.sql_ctx)
if __name__ == "__main__":
import doctest
import numpy
import pyspark.ml.clustering
from pyspark.sql import SparkSession
try:
# Numpy 1.14+ changed it's string format.
numpy.set_printoptions(legacy='1.13')
except TypeError:
pass
globs = pyspark.ml.clustering.__dict__.copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder\
.master("local[2]")\
.appName("ml.clustering tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
import tempfile
temp_path = tempfile.mkdtemp()
globs['temp_path'] = temp_path
try:
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
finally:
from shutil import rmtree
try:
rmtree(temp_path)
except OSError:
pass
if failure_count:
sys.exit(-1)
| apache-2.0 |
StefanoD/ComputerVision | Uebung/Uebung1/src/libcore.py | 1 | 5979 | from skimage.data import imread
import numpy as np
from enum import Enum
import math
import matplotlib.pyplot as plt
class Img:
@staticmethod
def load_image(path, as_grey = False, to_float = True):
# Load image
image = imread(path, as_grey)
if to_float:
# Convert to floating point matrix
image = image.astype(np.float32)
return image
@staticmethod
def get_2d_rotation_matrix(rad):
rotation_matrix = np.zeros((2, 2))
rotation_matrix[0, 0] = math.cos(rad)
rotation_matrix[0, 1] = -math.sin(rad)
rotation_matrix[1, 0] = math.sin(rad)
rotation_matrix[1, 1] = math.cos(rad)
return rotation_matrix
@staticmethod
def get_2d_scale_matrix(scale):
scale_matrix = np.zeros((2, 2))
scale_matrix[0, 0] = scale
scale_matrix[1, 1] = scale
return scale_matrix
@staticmethod
def get_2d_x_scale_matrix(scale):
x_scale_matrix = np.zeros((2, 2))
x_scale_matrix[0, 0] = scale
x_scale_matrix[1, 1] = 1
return x_scale_matrix
@staticmethod
def get_2d_x_y_scale_matrix(x_scale, y_scale):
x_scale_matrix = np.zeros((2, 2))
x_scale_matrix[0, 0] = x_scale
x_scale_matrix[1, 1] = y_scale
return x_scale_matrix
@staticmethod
def get_x_3d_rotation_matrix(degrees):
"""Rotation through x axis"""
rotation_matrix = np.zeros((3, 3))
rotation_matrix[0, 0, 0] = 1
rotation_matrix[1, 1, 1] = math.cos(degrees)
rotation_matrix[1, 1, 2] = -math.sin(degrees)
rotation_matrix[2, 2, 1] = math.sin(degrees)
rotation_matrix[2, 2, 2] = math.cos(degrees)
return rotation_matrix
@staticmethod
def get_scale_diagonal_matrix(scale_diag):
scale_diagonal_matrix = np.zeros((2, 2))
scale_diagonal_matrix[0, 0] = 1
scale_diagonal_matrix[0, 1] = scale_diag
scale_diagonal_matrix[1, 0] = 1
scale_diagonal_matrix[1, 1] = 1
return scale_diagonal_matrix
@staticmethod
def get_scale_orthogonal_matrix(scale_orthogonal):
scale_orthogonal_matrix = np.zeros((2, 2))
scale_orthogonal_matrix[0, 0] = 1
scale_orthogonal_matrix[0, 1] = 1
scale_orthogonal_matrix[1, 0] = scale_orthogonal
scale_orthogonal_matrix[1, 1] = 1
return scale_orthogonal_matrix
class Transform:
@staticmethod
def translate(matrix, trans_vector):
return matrix + trans_vector
class RestructuringMethod(Enum):
NearestNeighbor = 1
BilinearInterpolation = 2
@staticmethod
def affine_transform(image,
transform_matrix,
translation_vector,
restructuring_method=BilinearInterpolation):
from numpy.linalg import inv
new_x_size = int(image.shape[0] * 1.5)
new_y_size = int(image.shape[1] * 1.5)
new_image = np.zeros((new_x_size, new_y_size, 3))
# Get the inverse matrix for indirect restructuring
trans_inv = inv(transform_matrix)
for x in range(new_x_size):
for y in range(new_y_size):
new_coordinates = np.array([x, y])
# First reverse translation
new_coordinates = new_coordinates - translation_vector + np.array([0, -image.shape[1]/2])#-image.shape[0]/2
# Reverse transformation
new_coordinates = np.dot(new_coordinates, trans_inv)
new_x = new_coordinates[0]
new_y = new_coordinates[1]
if restructuring_method == RestructuringMethod.NearestNeighbor:
new_x, new_y = RestructuringMethod.nearest_neighboor(new_x, new_y)
if new_x > 0 and new_y > 0 and new_x < image.shape[0] and new_y < image.shape[1]:
if restructuring_method == RestructuringMethod.BilinearInterpolation:
new_image[x, y, 0] = RestructuringMethod.bilinear_interpolation(image[:, :, 0], new_x, new_y)
new_image[x, y, 1] = RestructuringMethod.bilinear_interpolation(image[:, :, 1], new_x, new_y)
new_image[x, y, 2] = RestructuringMethod.bilinear_interpolation(image[:, :, 2], new_x, new_y)
else:
new_image[x, y, 0] = image[new_x, new_y, 0]
new_image[x, y, 1] = image[new_x, new_y, 1]
new_image[x, y, 2] = image[new_x, new_y, 2]
# back casting to uint8
return new_image.astype(np.uint8)
@staticmethod
def bilinear_interpolation(image, x, y):
x_left = int(x)
x_right = int(x + 1)
y_upper = int(y)
y_lower = int(y + 1)
# Because we added 1 on x and y, we could possibly be over
# the range of the image
image_x_max_index = image.shape[0] - 1
image_y_max_index = image.shape[1] - 1
if (x_right > image_x_max_index or y_lower > image_y_max_index):
return image[x, y]
# calculate areas
a1 = (x - x_left) * (y - y_upper)
a2 = (x_right - x) * (y - y_upper)
a3 = (x - x_left) * (y_lower - y)
a4 = (x_right - x) * (y_lower - y)
grey_value_left_upper = image[x_left, y_upper]
grey_value_right_upper = image[x_right, y_upper]
grey_value_left_lower = image[x_left, y_lower]
grey_value_right_lower = image[x_right, y_lower]
bilinear_interpolated_gray_value = grey_value_left_upper * a4 + grey_value_right_upper * a3 + \
grey_value_left_lower * a2 + grey_value_right_lower * a1
return bilinear_interpolated_gray_value
@staticmethod
def nearest_neighboor(x, y):
# round coordinates
new_x = int(x + 0.5)
new_y = int(y + 0.5)
return new_x, new_y
| apache-2.0 |
ChristosChristofidis/h2o-3 | h2o-py/tests/testdir_algos/kmeans/pyunit_iris_h2o_vs_sciKmeans.py | 1 | 1224 | import sys
sys.path.insert(1, "../../../")
import h2o
import numpy as np
from sklearn.cluster import KMeans
def iris_h2o_vs_sciKmeans(ip,port):
# Connect to a pre-existing cluster
h2o.init(ip,port) # connect to localhost:54321
iris_h2o = h2o.import_frame(path=h2o.locate("smalldata/iris/iris.csv"))
iris_sci = np.genfromtxt(h2o.locate("smalldata/iris/iris.csv"), delimiter=',')
iris_sci = iris_sci[:,0:4]
s =[[4.9,3.0,1.4,0.2],
[5.6,2.5,3.9,1.1],
[6.5,3.0,5.2,2.0]]
start = h2o.H2OFrame(s)
h2o_km = h2o.kmeans(x=iris_h2o[0:4], k=3, user_points=start, standardize=False)
sci_km = KMeans(n_clusters=3, init=np.asarray(s), n_init=1)
sci_km.fit(iris_sci)
# Log.info("Cluster centers from H2O:")
print "Cluster centers from H2O:"
h2o_centers = h2o_km.centers()
print h2o_centers
# Log.info("Cluster centers from scikit:")
print "Cluster centers from scikit:"
sci_centers = sci_km.cluster_centers_.tolist()
print sci_centers
for hcenter, scenter in zip(h2o_centers, sci_centers):
for hpoint, spoint in zip(hcenter,scenter):
assert (hpoint- spoint) < 1e-10, "expected centers to be the same"
if __name__ == "__main__":
h2o.run_test(sys.argv, iris_h2o_vs_sciKmeans)
| apache-2.0 |
AlessandroCorsi/fibermodes | scripts/cutoff.py | 2 | 8962 | # This file is part of FiberModes.
#
# FiberModes is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FiberModes is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with FiberModes. If not, see <http://www.gnu.org/licenses/>.
"""This file contains the code to generate plots used in the JLT paper
about cutoff of three-layer step-index fibers.
"""
from fibermodes import FiberFactory, Simulator, Mode, ModeFamily
from itertools import zip_longest
import numpy
from matplotlib import pyplot
from matplotlib.patches import Rectangle
import seaborn as sns
from math import sqrt
FIBERS = [
("Fiber (a)", [4e-6, 6e-6], [1.47, 1.43, 1.44]),
("Fiber (b)", [4e-6, 6e-6], [1.47, 1.45, 1.44]),
("Fiber (c)", [4e-6, 6e-6], [1.43, 1.47, 1.44]),
("Fiber (d)", [4e-6, 6e-6], [1.45, 1.47, 1.44]),
("Fiber (e)", [4e-6, 6e-6], [1.44, 1.47, 1.44]),
]
VLIM = (2.5, 7.0)
MODE_COLORS = {
"HE(1,1)": sns.xkcd_rgb['mid blue'],
"LP(0,1)": sns.xkcd_rgb['mid blue'],
"TE(0,1)": sns.xkcd_rgb['orange'],
"HE(2,1)": sns.xkcd_rgb['bright sky blue'],
"LP(1,1)": sns.xkcd_rgb['bright sky blue'],
"TM(0,1)": sns.xkcd_rgb['red'],
"EH(1,1)": sns.xkcd_rgb['darkish green'],
"HE(3,1)": sns.xkcd_rgb['purplish blue'],
"LP(2,1)": sns.xkcd_rgb['purplish blue'],
"EH(2,1)": sns.xkcd_rgb['bluish green'],
"HE(4,1)": sns.xkcd_rgb['fuchsia'],
"LP(3,1)": sns.xkcd_rgb['fuchsia'],
"EH(3,1)": sns.xkcd_rgb['leafy green'],
"HE(5,1)": sns.xkcd_rgb['neon pink'],
"LP(4,1)": sns.xkcd_rgb['neon pink'],
"EH(4,1)": sns.xkcd_rgb['bright olive'],
"HE(6,1)": sns.xkcd_rgb['rosy pink'],
"LP(5,1)": sns.xkcd_rgb['rosy pink'],
"EH(5,1)": sns.xkcd_rgb['darkish green'],
"HE(7,1)": sns.xkcd_rgb['purplish blue'],
"LP(6,1)": sns.xkcd_rgb['purplish blue'],
"EH(6,1)": sns.xkcd_rgb['bluish green'],
"HE(8,1)": sns.xkcd_rgb['fuchsia'],
"LP(7,1)": sns.xkcd_rgb['fuchsia'],
"EH(7,1)": sns.xkcd_rgb['leafy green'],
"HE(9,1)": sns.xkcd_rgb['neon pink'],
"LP(8,1)": sns.xkcd_rgb['neon pink'],
"EH(8,1)": sns.xkcd_rgb['bright olive'],
"HE(10,1)": sns.xkcd_rgb['rosy pink'],
"LP(9,1)": sns.xkcd_rgb['rosy pink'],
"HE(1,2)": sns.xkcd_rgb['deep sky blue'],
"LP(0,2)": sns.xkcd_rgb['deep sky blue'],
"TE(0,2)": sns.xkcd_rgb['browny orange'],
"HE(2,2)": sns.xkcd_rgb['true blue'],
"LP(1,2)": sns.xkcd_rgb['true blue'],
"TM(0,2)": sns.xkcd_rgb['blood red'],
"EH(1,2)": sns.xkcd_rgb['evergreen'],
"HE(3,2)": sns.xkcd_rgb['bright violet'],
"LP(2,2)": sns.xkcd_rgb['bright violet'],
"LP(0,3)": sns.xkcd_rgb['turquoise blue'],
}
FIRSTMODES = (
Mode(ModeFamily.TE, 0, 1),
Mode(ModeFamily.HE, 2, 1),
Mode(ModeFamily.TM, 0, 1),
Mode(ModeFamily.EH, 1, 1),
Mode(ModeFamily.HE, 3, 1),
Mode(ModeFamily.HE, 1, 2),
)
def plot_b_vs_V(vectorial=True, scalar=False):
nf = len(FIBERS)
fig, axes = pyplot.subplots(nf, 1, sharex=False, sharey=False,
subplot_kw={'xlim': VLIM, 'ylim': (0, 0.3)},
figsize=(6, 9))
sns.despine(fig)
lines = {}
for i, (name, r, n) in enumerate(FIBERS):
axes[i].set_title(name)
f = FiberFactory()
for (r_, n_) in zip_longest(r, n):
f.addLayer(radius=r_, index=n_)
fiber = f[0]
V = numpy.linspace(*VLIM)
wl = [fiber.toWl(v) for v in V[::-1]]
sim = Simulator(f, wl, vectorial=vectorial, scalar=scalar, delta=1e-5)
co = next(sim.cutoff())
b = next(sim.b())
assert len(b) == len(wl)
for mode, cutoff in co[0].items():
if cutoff == 0:
continue # skip HE(1,1) / LP(0,1)
color = MODE_COLORS[str(mode)]
axes[i].axvline(cutoff, ls=':', color=color)
b_ = numpy.empty(len(wl))
for j, b__ in enumerate(b):
b_[j] = b__.get(mode, float("nan"))
lines[mode], = axes[i].plot(V[::-1], b_, color=color)
if i == 1 and vectorial is True: # fiber b
r = Rectangle((4.6, 0), 0.8, 0.04, alpha=.3, facecolor='grey')
axes[i].add_patch(r)
handles = [lines[k] for k in sorted(lines)]
labels = [str(k) for k in sorted(lines)]
leg = fig.legend(handles, labels, loc='upper left',
bbox_to_anchor=(0.18, 1), frameon=True)
frame = leg.get_frame()
frame.set_linewidth(0)
fig.text(0.04, 0.5, "Normalized propagation constant ($b$)",
rotation='vertical', ha='center', va='center')
axes[-1].set_xlabel("Normalized frequency ($V_0$)")
fig.tight_layout(rect=(0.04, 0, 1, 1))
def plot_zoom(fiber, vlim=(4.6, 5.4), blim=(0, 0.04)):
fig = pyplot.figure(figsize=(6, 5))
ax = fig.add_subplot(111, xlim=vlim, ylim=blim)
sns.despine(fig)
name, r, n = fiber
ax.set_title(name)
f = FiberFactory()
for (r_, n_) in zip_longest(r, n):
f.addLayer(radius=r_, index=n_)
fiber = f[0]
V = numpy.linspace(*vlim)
wl = [fiber.toWl(v) for v in V[::-1]]
sim = Simulator(f, wl, delta=1e-7)
co = next(sim.cutoff())
b = next(sim.b())
for mode, cutoff in co[0].items():
if cutoff == 0:
continue # skip HE(1,1) / LP(0,1)
color = MODE_COLORS[str(mode)]
ax.axvline(cutoff, ls=':', color=color)
b_ = numpy.empty(len(wl))
for j, b__ in enumerate(b):
b_[j] = b__.get(mode, float("nan"))
ax.plot(V[::-1], b_, color=color,
label=str(mode) if mode.nu in (1, 3) else None)
ax.set_ylabel("Normalized propagation constant ($b$)")
ax.set_xlabel("Normalized frequency ($V_0$)")
ax.legend(loc='best')
fig.tight_layout()
def plot_var(n1, n2, n3, vlim, modes=None, mmax=None, numax=None, colors=None):
f = FiberFactory()
f.addLayer(radius=4e-6, index=n1)
f.addLayer(radius=6e-6, index=n2)
f.addLayer(index=n3)
wl = 800e-9
if modes is not None:
numax = max(m.nu for m in modes)
mmax = max(m.m for m in modes)
sim = Simulator(f, wl, delta=1e-5, numax=numax, mmax=mmax)
co = list(sim.cutoff())
if modes is None:
modes = set()
for m_ in sim.modes():
modes |= m_[0]
fig = pyplot.figure(figsize=(6, 5))
ax = fig.add_subplot(111, xlim=vlim, ylim=(1.2, 1.8))
sns.despine(fig)
if hasattr(n1, '__iter__'):
yl = 'Index of center layer ($n_1$)'
n = n1
on = n2
var = 1
else:
yl = 'Index of middle layer ($n_2$)'
n = n2
on = n1
var = 2
na = sqrt(on**2 - n3*n3)
lines = {}
for mode in modes:
co_ = numpy.empty(len(n))
for i, co__ in enumerate(co):
co_[i] = co__[0].get(mode, float("nan"))
nm = max(n[i], on)
if n[i] == n3 and var == 2:
co_[i] *= 6 / 4
else:
co_[i] *= na / sqrt(nm*nm - n3*n3)
if colors:
color = colors[mode.m][mode.nu]
else:
color = MODE_COLORS[str(mode)]
lines[mode], = ax.plot(co_, n, color=color, label=str(mode))
ax.axhline(1.4, ls='--', color='k')
ax.axhline(1.6, ls='--', color='k')
ax.axhspan(1.2, 1.4, color='grey', alpha=0.6)
ax.axhspan(1.4, 1.6, color='grey', alpha=0.4)
ax.axhspan(1.6, 1.8, color='grey', alpha=0.2)
ax.set_ylabel(yl)
ax.set_xlabel("Normalized frequency ($V_0$)")
if colors:
m = [Mode("TE", 0, 1), Mode("HE", 1, 2), Mode("HE", 1, 3)]
handles = [lines[m_] for m_ in m]
labels = ["$m=1$", "$m=2$", "$m=3$"]
ax.legend(handles, labels, loc='best')
else:
ax.legend(loc='best')
fig.tight_layout()
if __name__ == '__main__':
sns.set_style("ticks")
# plot_b_vs_V() # veccutoff.pdf
# plot_b_vs_V(vectorial=False, scalar=True) # lpcutoff.pdf
# plot_zoom(FIBERS[1]) # fiberbzoom.pdf
COLORS = [[],
sns.color_palette("Blues_r"),
sns.color_palette("Reds_r"),
sns.color_palette("Greens_r")]
# plot_var(numpy.linspace(1.2, 1.8, 31), 1.6, 1.4,
# (1, 8), FIRSTMODES) # centervar
plot_var(numpy.linspace(1.2, 1.8), 1.6, 1.4,
(0, 25), mmax=3, numax=5, colors=COLORS)
# plot_var(1.6, numpy.linspace(1.2, 1.8, 31), 1.4,
# (1, 8), FIRSTMODES) # ringvar
pyplot.show()
| gpl-3.0 |
rstoneback/pysat | pysat/tests/test_utils.py | 2 | 17475 | """
tests the pysat utils area
"""
import os
import tempfile
import warnings
from nose.tools import assert_raises, raises
import numpy as np
import pandas as pds
import pysat
import sys
if sys.version_info[0] >= 3:
from importlib import reload as re_load
else:
re_load = reload
# ----------------------------------
# test netCDF export file support
def prep_dir(inst=None):
if inst is None:
inst = pysat.Instrument(platform='pysat', name='testing')
# create data directories
try:
os.makedirs(inst.files.data_path)
except OSError:
pass
def remove_files(inst):
# remove any files
temp_dir = inst.files.data_path
for the_file in os.listdir(temp_dir):
if (the_file == 'pysat_test_ncdf.nc'):
file_path = os.path.join(temp_dir, the_file)
if os.path.isfile(file_path):
os.unlink(file_path)
def test_deprecation_warning_computational_form():
"""Test if computational form in utils is deprecated"""
data = pds.Series([0, 1, 2])
warnings.simplefilter("always")
dslice1 = pysat.ssnl.computational_form(data)
with warnings.catch_warnings(record=True) as war:
dslice2 = pysat.utils.computational_form(data)
assert (dslice1 == dslice2).all()
assert len(war) >= 1
assert war[0].category == DeprecationWarning
class TestBasics():
def setup(self):
"""Runs before every method to create a clean testing setup."""
# store current pysat directory
self.data_path = pysat.data_dir
def teardown(self):
"""Runs after every method to clean up previous testing."""
pysat.utils.set_data_dir(self.data_path)
#######################
# test pysat data dir options
def test_set_data_dir(self):
"""update data_dir"""
pysat.utils.set_data_dir('.')
check1 = (pysat.data_dir == '.')
# Check if next load of pysat remembers the change
pysat._files = re_load(pysat._files)
pysat._instrument = re_load(pysat._instrument)
re_load(pysat)
check2 = (pysat.data_dir == '.')
assert check1 & check2
def test_set_data_dir_no_store(self):
"""update data_dir without storing"""
pysat.utils.set_data_dir('.', store=False)
check1 = (pysat.data_dir == '.')
# Check if next load of pysat remembers old settings
pysat._files = re_load(pysat._files)
pysat._instrument = re_load(pysat._instrument)
re_load(pysat)
check2 = (pysat.data_dir == self.data_path)
assert check1 & check2
def test_initial_pysat_load(self):
import shutil
saved = False
try:
root = os.path.join(os.getenv('HOME'), '.pysat')
new_root = os.path.join(os.getenv('HOME'), '.saved_pysat')
shutil.move(root, new_root)
saved = True
except:
pass
re_load(pysat)
try:
if saved:
# remove directory, trying to be careful
os.remove(os.path.join(root, 'data_path.txt'))
os.rmdir(root)
shutil.move(new_root, root)
except:
pass
assert True
class TestScaleUnits():
def setup(self):
"""Runs before every method to create a clean testing setup."""
self.deg_units = ["deg", "degree", "degrees", "rad", "radian",
"radians", "h", "hr", "hrs", "hours"]
self.dist_units = ["m", "km", "cm"]
self.vel_units = ["m/s", "cm/s", "km/s", 'm s$^{-1}$', 'cm s$^{-1}$',
'km s$^{-1}$', 'm s-1', 'cm s-1', 'km s-1']
def teardown(self):
"""Runs after every method to clean up previous testing."""
del self.deg_units, self.dist_units, self.vel_units
def test_scale_units_same(self):
""" Test scale_units when both units are the same """
scale = pysat.utils.scale_units("happy", "happy")
assert scale == 1.0
def test_scale_units_angles(self):
"""Test scale_units for angles """
for out_unit in self.deg_units:
scale = pysat.utils.scale_units(out_unit, "deg")
if out_unit.find("deg") == 0:
assert scale == 1.0
elif out_unit.find("rad") == 0:
assert scale == np.pi / 180.0
else:
assert scale == 1.0 / 15.0
def test_scale_units_dist(self):
"""Test scale_units for distances """
for out_unit in self.dist_units:
scale = pysat.utils.scale_units(out_unit, "m")
if out_unit == "m":
assert scale == 1.0
elif out_unit.find("km") == 0:
assert scale == 0.001
else:
assert scale == 100.0
def test_scale_units_vel(self):
"""Test scale_units for velocities """
for out_unit in self.vel_units:
scale = pysat.utils.scale_units(out_unit, "m/s")
if out_unit.find("m") == 0:
assert scale == 1.0
elif out_unit.find("km") == 0:
assert scale == 0.001
else:
assert scale == 100.0
def test_scale_units_bad_output(self):
"""Test scale_units for unknown output unit"""
assert_raises(ValueError, pysat.utils.scale_units, "happy", "m")
try:
pysat.utils.scale_units('happy', 'm')
except ValueError as verr:
assert str(verr).find('output unit') > 0
def test_scale_units_bad_input(self):
"""Test scale_units for unknown input unit"""
assert_raises(ValueError, pysat.utils.scale_units, "m", "happy")
try:
pysat.utils.scale_units('m', 'happy')
except ValueError as verr:
assert str(verr).find('input unit') > 0
def test_scale_units_bad_match_pairs(self):
"""Test scale_units for mismatched input for all pairings"""
assert_raises(ValueError, pysat.utils.scale_units, "m", "m/s")
assert_raises(ValueError, pysat.utils.scale_units, "m", "deg")
assert_raises(ValueError, pysat.utils.scale_units, "h", "km/s")
def test_scale_units_bad_match_message(self):
"""Test scale_units error message for mismatched input"""
assert_raises(ValueError, pysat.utils.scale_units, "m", "m/s")
try:
pysat.utils.scale_units('m', 'm/s')
except ValueError as verr:
assert str(verr).find('Cannot scale') >= 0
assert str(verr).find('unknown units') < 0
def test_scale_units_both_bad(self):
"""Test scale_units for bad input and output"""
assert_raises(ValueError, pysat.utils.scale_units, "happy", "sad")
try:
pysat.utils.scale_units('happy', 'sad')
except ValueError as verr:
assert str(verr).find('unknown units') > 0
class TestBasicNetCDF4():
def setup(self):
"""Runs before every method to create a clean testing setup."""
# store current pysat directory
self.data_path = pysat.data_dir
# create temporary directory
dir_name = tempfile.mkdtemp()
pysat.utils.set_data_dir(dir_name, store=False)
self.testInst = pysat.Instrument(platform='pysat',
name='testing',
clean_level='clean')
self.testInst.pandas_format = True
# create testing directory
prep_dir(self.testInst)
def teardown(self):
"""Runs after every method to clean up previous testing."""
remove_files(self.testInst)
try:
pysat.utils.set_data_dir(self.data_path, store=False)
except:
pass
del self.testInst
def test_basic_write_and_read_netcdf4_default_format(self):
# create a bunch of files by year and doy
prep_dir(self.testInst)
outfile = os.path.join(self.testInst.files.data_path,
'pysat_test_ncdf.nc')
self.testInst.load(2009, 1)
self.testInst.to_netcdf4(outfile)
loaded_inst, meta = pysat.utils.load_netcdf4(outfile)
self.testInst.data = \
self.testInst.data.reindex(sorted(self.testInst.data.columns),
axis=1)
loaded_inst = loaded_inst.reindex(sorted(loaded_inst.columns), axis=1)
for key in self.testInst.data.columns:
print('Testing Data Equality to filesystem and back ', key)
assert(np.all(self.testInst[key] == loaded_inst[key]))
def test_basic_write_and_read_netcdf4_mixed_case_format(self):
# create a bunch of files by year and doy
prep_dir(self.testInst)
outfile = os.path.join(self.testInst.files.data_path,
'pysat_test_ncdf.nc')
self.testInst.load(2009, 1)
# modify data names in data
original = sorted(self.testInst.data.columns)
self.testInst.data = self.testInst.data.rename(str.upper, axis='columns')
self.testInst.to_netcdf4(outfile, preserve_meta_case=True)
loaded_inst, meta = pysat.utils.load_netcdf4(outfile)
self.testInst.data = \
self.testInst.data.reindex(sorted(self.testInst.data.columns),
axis=1)
loaded_inst = loaded_inst.reindex(sorted(loaded_inst.columns), axis=1)
# check that names are lower case when written
assert(np.all(original == loaded_inst.columns))
for key in self.testInst.data.columns:
print('Testing Data Equality to filesystem and back ', key)
assert(np.all(self.testInst[key] == loaded_inst[key.lower()]))
# modify metadata names in data
self.testInst.meta.data = self.testInst.meta.data.rename(str.upper, axis='index')
# write file
self.testInst.to_netcdf4(outfile, preserve_meta_case=True)
# load file
loaded_inst, meta = pysat.utils.load_netcdf4(outfile)
# check that names are upper case when written
assert(np.all(sorted(self.testInst.data.columns) == sorted(loaded_inst.columns)))
@raises(Exception)
def test_write_netcdf4_duplicate_variable_names(self):
# create a bunch of files by year and doy
prep_dir(self.testInst)
outfile = os.path.join(self.testInst.files.data_path,
'pysat_test_ncdf.nc')
self.testInst.load(2009, 1)
self.testInst['MLT'] = 1
self.testInst.to_netcdf4(outfile, preserve_meta_case=True)
def test_write_and_read_netcdf4_default_format_w_compression(self):
# create a bunch of files by year and doy
prep_dir(self.testInst)
outfile = os.path.join(self.testInst.files.data_path,
'pysat_test_ncdf.nc')
self.testInst.load(2009, 1)
self.testInst.to_netcdf4(outfile, zlib=True)
loaded_inst, meta = pysat.utils.load_netcdf4(outfile)
self.testInst.data = \
self.testInst.data.reindex(sorted(self.testInst.data.columns),
axis=1)
loaded_inst = loaded_inst.reindex(sorted(loaded_inst.columns), axis=1)
for key in self.testInst.data.columns:
print('Testing Data Equality to filesystem and back ', key)
assert (np.all(self.testInst[key] == loaded_inst[key]))
# assert(np.all(self.testInst.data == loaded_inst))
def test_write_and_read_netcdf4_default_format_w_weird_epoch_name(self):
# create a bunch of files by year and doy
prep_dir(self.testInst)
outfile = os.path.join(self.testInst.files.data_path,
'pysat_test_ncdf.nc')
self.testInst.load(2009, 1)
self.testInst.to_netcdf4(outfile, epoch_name='Santa')
loaded_inst, meta = pysat.utils.load_netcdf4(outfile,
epoch_name='Santa')
self.testInst.data = \
self.testInst.data.reindex(sorted(self.testInst.data.columns),
axis=1)
loaded_inst = loaded_inst.reindex(sorted(loaded_inst.columns), axis=1)
for key in self.testInst.data.columns:
print('Testing Data Equality to filesystem and back ', key)
assert (np.all(self.testInst[key] == loaded_inst[key]))
def test_write_and_read_netcdf4_default_format_higher_order(self):
# create a bunch of files by year and doy
test_inst = pysat.Instrument('pysat', 'testing2d')
prep_dir(test_inst)
outfile = os.path.join(test_inst.files.data_path, 'pysat_test_ncdf.nc')
test_inst.load(2009, 1)
test_inst.to_netcdf4(outfile)
loaded_inst, meta = pysat.utils.load_netcdf4(outfile)
test_inst.data = test_inst.data.reindex(sorted(test_inst.data.columns),
axis=1)
loaded_inst = loaded_inst.reindex(sorted(loaded_inst.columns), axis=1)
prep_dir(test_inst)
# test Series of DataFrames
test_list = []
for frame1, frame2 in zip(test_inst.data['profiles'],
loaded_inst['profiles']):
test_list.append(np.all((frame1 == frame2).all()))
loaded_inst.drop('profiles', inplace=True, axis=1)
test_inst.data.drop('profiles', inplace=True, axis=1)
# second series of frames
for frame1, frame2 in zip(test_inst.data['alt_profiles'],
loaded_inst['alt_profiles']):
test_list.append(np.all((frame1 == frame2).all()))
loaded_inst.drop('alt_profiles', inplace=True, axis=1)
test_inst.data.drop('alt_profiles', inplace=True, axis=1)
# check series of series
for frame1, frame2 in zip(test_inst.data['series_profiles'],
loaded_inst['series_profiles']):
test_list.append(np.all((frame1 == frame2).all()))
loaded_inst.drop('series_profiles', inplace=True, axis=1)
test_inst.data.drop('series_profiles', inplace=True, axis=1)
assert(np.all((test_inst.data == loaded_inst).all()))
assert np.all(test_list)
def test_write_and_read_netcdf4_default_format_higher_order_w_zlib(self):
# create a bunch of files by year and doy
test_inst = pysat.Instrument('pysat', 'testing2d')
prep_dir(test_inst)
outfile = os.path.join(test_inst.files.data_path, 'pysat_test_ncdf.nc')
test_inst.load(2009, 1)
test_inst.to_netcdf4(outfile, zlib=True)
loaded_inst, meta = pysat.utils.load_netcdf4(outfile)
test_inst.data = test_inst.data.reindex(sorted(test_inst.data.columns),
axis=1)
loaded_inst = loaded_inst.reindex(sorted(loaded_inst.columns), axis=1)
prep_dir(test_inst)
# test Series of DataFrames
test_list = []
for frame1, frame2 in zip(test_inst.data['profiles'],
loaded_inst['profiles']):
test_list.append(np.all((frame1 == frame2).all()))
loaded_inst.drop('profiles', inplace=True, axis=1)
test_inst.data.drop('profiles', inplace=True, axis=1)
# second series of frames
for frame1, frame2 in zip(test_inst.data['alt_profiles'],
loaded_inst['alt_profiles']):
test_list.append(np.all((frame1 == frame2).all()))
loaded_inst.drop('alt_profiles', inplace=True, axis=1)
test_inst.data.drop('alt_profiles', inplace=True, axis=1)
# check series of series
for frame1, frame2 in zip(test_inst.data['series_profiles'],
loaded_inst['series_profiles']):
test_list.append(np.all((frame1 == frame2).all()))
loaded_inst.drop('series_profiles', inplace=True, axis=1)
test_inst.data.drop('series_profiles', inplace=True, axis=1)
assert (np.all((test_inst.data == loaded_inst).all()))
assert np.all(test_list)
def test_netcdf_attribute_override(self):
"""Test that attributes in netcdf file may be overridden"""
self.testInst.load(2009, 1)
try:
assert self.testInst.bespoke # should raise
except AttributeError:
pass
fname = 'output.nc'
self.testInst.meta.bespoke = True
self.testInst.meta.transfer_attributes_to_instrument(self.testInst)
# ensure custom meta attribute assigned to instrument
assert self.testInst.bespoke
outfile = os.path.join(self.testInst.files.data_path, fname)
self.testInst.to_netcdf4(outfile)
data, meta = pysat.utils.load_netcdf4(outfile)
# custom attribute correctly read from file
assert meta.bespoke
# assign metadata to new instrument
inst = pysat.Instrument()
inst.data = data
inst.meta = meta
meta.transfer_attributes_to_instrument(inst)
fname2 = 'output2.nc'
outfile2 = os.path.join(self.testInst.files.data_path, fname2)
inst.bespoke = False
inst.myattr = True
inst.to_netcdf4(outfile2)
data2, meta2 = pysat.utils.load_netcdf4(outfile2)
assert meta2.myattr
assert not meta2.bespoke
| bsd-3-clause |
treverhines/RBF | docs/scripts/interpolate.c.py | 1 | 2385 | '''
This script compares RBFInterpolant to NearestRBFInterpolant class for Franke's
test function.
'''
import numpy as np
import matplotlib.pyplot as plt
from rbf.interpolate import RBFInterpolant, KNearestRBFInterpolant
np.random.seed(1)
def frankes_test_function(x):
x1, x2 = x[:, 0], x[:, 1]
term1 = 0.75 * np.exp(-(9*x1-2)**2/4 - (9*x2-2)**2/4)
term2 = 0.75 * np.exp(-(9*x1+1)**2/49 - (9*x2+1)/10)
term3 = 0.5 * np.exp(-(9*x1-7)**2/4 - (9*x2-3)**2/4)
term4 = -0.2 * np.exp(-(9*x1-4)**2 - (9*x2-7)**2)
y = term1 + term2 + term3 + term4
return y
# the observations and their locations for interpolation
xobs = np.random.uniform(0.0, 1.0, (500, 2))
yobs = frankes_test_function(xobs)
# the locations where we evaluate the interpolants
xitp = np.mgrid[0:1:200j, 0:1:200j].reshape(2, -1).T
# the true function which we want the interpolants to reproduce
true_soln = frankes_test_function(xitp)
yitp = RBFInterpolant(xobs, yobs, phi='phs3', order=1)(xitp)
fig, ax = plt.subplots(1, 2, figsize=(9, 3.5))
ax[0].set_title('RBFInterpolant')
p = ax[0].tripcolor(xitp[:, 0], xitp[:, 1], yitp)
ax[0].scatter(xobs[:, 0], xobs[:, 1], c='k', s=3)
ax[0].set_xlim(0, 1)
ax[0].set_ylim(0, 1)
ax[0].set_aspect('equal')
ax[0].grid(ls=':', color='k')
fig.colorbar(p, ax=ax[0])
ax[1].set_title('|error|')
p = ax[1].tripcolor(xitp[:, 0], xitp[:, 1], np.abs(yitp - true_soln))
ax[1].set_xlim(0, 1)
ax[1].set_ylim(0, 1)
ax[1].set_aspect('equal')
ax[1].grid(ls=':', color='k')
fig.colorbar(p, ax=ax[1])
fig.tight_layout()
plt.savefig('../figures/interpolate.c.all.png')
for k in [5, 20, 50]:
yitp = KNearestRBFInterpolant(xobs, yobs, k=k, phi='phs3', order=1)(xitp)
fig, ax = plt.subplots(1, 2, figsize=(9, 3.5))
ax[0].set_title('KNearestRBFInterpolant with k=%d' % k)
p = ax[0].tripcolor(xitp[:, 0], xitp[:, 1], yitp)
ax[0].scatter(xobs[:, 0], xobs[:, 1], c='k', s=3)
ax[0].set_xlim(0, 1)
ax[0].set_ylim(0, 1)
ax[0].set_aspect('equal')
ax[0].grid(ls=':', color='k')
fig.colorbar(p, ax=ax[0])
ax[1].set_title('|error|')
p = ax[1].tripcolor(xitp[:, 0], xitp[:, 1], np.abs(yitp - true_soln))
ax[1].set_xlim(0, 1)
ax[1].set_ylim(0, 1)
ax[1].set_aspect('equal')
ax[1].grid(ls=':', color='k')
fig.colorbar(p, ax=ax[1])
fig.tight_layout()
plt.savefig('../figures/interpolate.c.%d.png' % k)
plt.show()
| mit |
ShawnMurd/MetPy | tests/plots/test_cartopy_utils.py | 1 | 2792 | # Copyright (c) 2018 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Test the cartopy utilities."""
import cartopy.crs as ccrs
import matplotlib
import matplotlib.pyplot as plt
import pytest
from metpy.plots import USCOUNTIES, USSTATES
# Fixtures to make sure we have the right backend and consistent round
from metpy.testing import set_agg_backend # noqa: F401, I202
MPL_VERSION = matplotlib.__version__[:3]
@pytest.mark.mpl_image_compare(tolerance={'2.1': 0.161}.get(MPL_VERSION, 0.053),
remove_text=True)
def test_us_county_defaults():
"""Test the default US county plotting."""
proj = ccrs.LambertConformal(central_longitude=-85.0, central_latitude=45.0)
fig = plt.figure(figsize=(12, 9))
ax = fig.add_subplot(1, 1, 1, projection=proj)
ax.set_extent([270.25, 270.9, 38.15, 38.75], ccrs.Geodetic())
ax.add_feature(USCOUNTIES)
return fig
@pytest.mark.mpl_image_compare(tolerance={'2.1': 0.1994}.get(MPL_VERSION, 0.092),
remove_text=True)
def test_us_county_scales():
"""Test US county plotting with all scales."""
proj = ccrs.LambertConformal(central_longitude=-85.0, central_latitude=45.0)
fig = plt.figure(figsize=(12, 9))
ax1 = fig.add_subplot(1, 3, 1, projection=proj)
ax2 = fig.add_subplot(1, 3, 2, projection=proj)
ax3 = fig.add_subplot(1, 3, 3, projection=proj)
for scale, axis in zip(['20m', '5m', '500k'], [ax1, ax2, ax3]):
axis.set_extent([270.25, 270.9, 38.15, 38.75], ccrs.Geodetic())
axis.add_feature(USCOUNTIES.with_scale(scale))
return fig
@pytest.mark.mpl_image_compare(tolerance=0.053, remove_text=True)
def test_us_states_defaults():
"""Test the default US States plotting."""
proj = ccrs.LambertConformal(central_longitude=-85.0, central_latitude=45.0)
fig = plt.figure(figsize=(12, 9))
ax = fig.add_subplot(1, 1, 1, projection=proj)
ax.set_extent([270, 280, 28, 39], ccrs.Geodetic())
ax.add_feature(USSTATES)
return fig
@pytest.mark.mpl_image_compare(tolerance={'2.1': 0.991}.get(MPL_VERSION, 0.092),
remove_text=True)
def test_us_states_scales():
"""Test the default US States plotting with all scales."""
proj = ccrs.LambertConformal(central_longitude=-85.0, central_latitude=45.0)
fig = plt.figure(figsize=(12, 9))
ax1 = fig.add_subplot(1, 3, 1, projection=proj)
ax2 = fig.add_subplot(1, 3, 2, projection=proj)
ax3 = fig.add_subplot(1, 3, 3, projection=proj)
for scale, axis in zip(['20m', '5m', '500k'], [ax1, ax2, ax3]):
axis.set_extent([270, 280, 28, 39], ccrs.Geodetic())
axis.add_feature(USSTATES.with_scale(scale))
return fig
| bsd-3-clause |
joernhees/scikit-learn | examples/ensemble/plot_adaboost_hastie_10_2.py | 355 | 3576 | """
=============================
Discrete versus Real AdaBoost
=============================
This example is based on Figure 10.2 from Hastie et al 2009 [1] and illustrates
the difference in performance between the discrete SAMME [2] boosting
algorithm and real SAMME.R boosting algorithm. Both algorithms are evaluated
on a binary classification task where the target Y is a non-linear function
of 10 input features.
Discrete SAMME AdaBoost adapts based on errors in predicted class labels
whereas real SAMME.R uses the predicted class probabilities.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>,
# Noel Dawe <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import zero_one_loss
from sklearn.ensemble import AdaBoostClassifier
n_estimators = 400
# A learning rate of 1. may not be optimal for both SAMME and SAMME.R
learning_rate = 1.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_test, y_test = X[2000:], y[2000:]
X_train, y_train = X[:2000], y[:2000]
dt_stump = DecisionTreeClassifier(max_depth=1, min_samples_leaf=1)
dt_stump.fit(X_train, y_train)
dt_stump_err = 1.0 - dt_stump.score(X_test, y_test)
dt = DecisionTreeClassifier(max_depth=9, min_samples_leaf=1)
dt.fit(X_train, y_train)
dt_err = 1.0 - dt.score(X_test, y_test)
ada_discrete = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME")
ada_discrete.fit(X_train, y_train)
ada_real = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME.R")
ada_real.fit(X_train, y_train)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, n_estimators], [dt_stump_err] * 2, 'k-',
label='Decision Stump Error')
ax.plot([1, n_estimators], [dt_err] * 2, 'k--',
label='Decision Tree Error')
ada_discrete_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_test)):
ada_discrete_err[i] = zero_one_loss(y_pred, y_test)
ada_discrete_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_train)):
ada_discrete_err_train[i] = zero_one_loss(y_pred, y_train)
ada_real_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_test)):
ada_real_err[i] = zero_one_loss(y_pred, y_test)
ada_real_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_train)):
ada_real_err_train[i] = zero_one_loss(y_pred, y_train)
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err,
label='Discrete AdaBoost Test Error',
color='red')
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err_train,
label='Discrete AdaBoost Train Error',
color='blue')
ax.plot(np.arange(n_estimators) + 1, ada_real_err,
label='Real AdaBoost Test Error',
color='orange')
ax.plot(np.arange(n_estimators) + 1, ada_real_err_train,
label='Real AdaBoost Train Error',
color='green')
ax.set_ylim((0.0, 0.5))
ax.set_xlabel('n_estimators')
ax.set_ylabel('error rate')
leg = ax.legend(loc='upper right', fancybox=True)
leg.get_frame().set_alpha(0.7)
plt.show()
| bsd-3-clause |
SaganBolliger/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_qt4.py | 69 | 20664 | from __future__ import division
import math
import os
import sys
import matplotlib
from matplotlib import verbose
from matplotlib.cbook import is_string_like, onetrue
from matplotlib.backend_bases import RendererBase, GraphicsContextBase, \
FigureManagerBase, FigureCanvasBase, NavigationToolbar2, IdleEvent, cursors
from matplotlib._pylab_helpers import Gcf
from matplotlib.figure import Figure
from matplotlib.mathtext import MathTextParser
from matplotlib.widgets import SubplotTool
try:
from PyQt4 import QtCore, QtGui, Qt
except ImportError:
raise ImportError("Qt4 backend requires that PyQt4 is installed.")
backend_version = "0.9.1"
def fn_name(): return sys._getframe(1).f_code.co_name
DEBUG = False
cursord = {
cursors.MOVE : QtCore.Qt.SizeAllCursor,
cursors.HAND : QtCore.Qt.PointingHandCursor,
cursors.POINTER : QtCore.Qt.ArrowCursor,
cursors.SELECT_REGION : QtCore.Qt.CrossCursor,
}
def draw_if_interactive():
"""
Is called after every pylab drawing command
"""
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager != None:
figManager.canvas.draw()
def _create_qApp():
"""
Only one qApp can exist at a time, so check before creating one.
"""
if QtGui.QApplication.startingUp():
if DEBUG: print "Starting up QApplication"
global qApp
qApp = QtGui.QApplication( [" "] )
QtCore.QObject.connect( qApp, QtCore.SIGNAL( "lastWindowClosed()" ),
qApp, QtCore.SLOT( "quit()" ) )
#remember that matplotlib created the qApp - will be used by show()
_create_qApp.qAppCreatedHere = True
_create_qApp.qAppCreatedHere = False
def show():
"""
Show all the figures and enter the qt main loop
This should be the last line of your script
"""
for manager in Gcf.get_all_fig_managers():
manager.window.show()
if DEBUG: print 'Inside show'
figManager = Gcf.get_active()
if figManager != None:
figManager.canvas.draw()
if _create_qApp.qAppCreatedHere:
QtGui.qApp.exec_()
def new_figure_manager( num, *args, **kwargs ):
"""
Create a new figure manager instance
"""
thisFig = Figure( *args, **kwargs )
canvas = FigureCanvasQT( thisFig )
manager = FigureManagerQT( canvas, num )
return manager
class FigureCanvasQT( QtGui.QWidget, FigureCanvasBase ):
keyvald = { QtCore.Qt.Key_Control : 'control',
QtCore.Qt.Key_Shift : 'shift',
QtCore.Qt.Key_Alt : 'alt',
}
# left 1, middle 2, right 3
buttond = {1:1, 2:3, 4:2}
def __init__( self, figure ):
if DEBUG: print 'FigureCanvasQt: ', figure
_create_qApp()
QtGui.QWidget.__init__( self )
FigureCanvasBase.__init__( self, figure )
self.figure = figure
self.setMouseTracking( True )
# hide until we can test and fix
#self.startTimer(backend_IdleEvent.milliseconds)
w,h = self.get_width_height()
self.resize( w, h )
def __timerEvent(self, event):
# hide until we can test and fix
self.mpl_idle_event(event)
def enterEvent(self, event):
FigureCanvasBase.enter_notify_event(self, event)
def leaveEvent(self, event):
FigureCanvasBase.leave_notify_event(self, event)
def mousePressEvent( self, event ):
x = event.pos().x()
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.pos().y()
button = self.buttond[event.button()]
FigureCanvasBase.button_press_event( self, x, y, button )
if DEBUG: print 'button pressed:', event.button()
def mouseMoveEvent( self, event ):
x = event.x()
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y()
FigureCanvasBase.motion_notify_event( self, x, y )
#if DEBUG: print 'mouse move'
def mouseReleaseEvent( self, event ):
x = event.x()
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y()
button = self.buttond[event.button()]
FigureCanvasBase.button_release_event( self, x, y, button )
if DEBUG: print 'button released'
def keyPressEvent( self, event ):
key = self._get_key( event )
FigureCanvasBase.key_press_event( self, key )
if DEBUG: print 'key press', key
def keyReleaseEvent( self, event ):
key = self._get_key(event)
FigureCanvasBase.key_release_event( self, key )
if DEBUG: print 'key release', key
def resizeEvent( self, event ):
if DEBUG: print 'resize (%d x %d)' % (event.size().width(), event.size().height())
QtGui.QWidget.resizeEvent( self, event )
w = event.size().width()
h = event.size().height()
if DEBUG: print "FigureCanvasQtAgg.resizeEvent(", w, ",", h, ")"
dpival = self.figure.dpi
winch = w/dpival
hinch = h/dpival
self.figure.set_size_inches( winch, hinch )
self.draw()
def resize( self, w, h ):
# Pass through to Qt to resize the widget.
QtGui.QWidget.resize( self, w, h )
# Resize the figure by converting pixels to inches.
pixelPerInch = self.figure.dpi
wInch = w / pixelPerInch
hInch = h / pixelPerInch
self.figure.set_size_inches( wInch, hInch )
# Redraw everything.
self.draw()
def sizeHint( self ):
w, h = self.get_width_height()
return QtCore.QSize( w, h )
def minumumSizeHint( self ):
return QtCore.QSize( 10, 10 )
def _get_key( self, event ):
if event.key() < 256:
key = str(event.text())
elif event.key() in self.keyvald:
key = self.keyvald[ event.key() ]
else:
key = None
return key
def flush_events(self):
Qt.qApp.processEvents()
def start_event_loop(self,timeout):
FigureCanvasBase.start_event_loop_default(self,timeout)
start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__
class FigureManagerQT( FigureManagerBase ):
"""
Public attributes
canvas : The FigureCanvas instance
num : The Figure number
toolbar : The qt.QToolBar
window : The qt.QMainWindow
"""
def __init__( self, canvas, num ):
if DEBUG: print 'FigureManagerQT.%s' % fn_name()
FigureManagerBase.__init__( self, canvas, num )
self.canvas = canvas
self.window = QtGui.QMainWindow()
self.window.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.window.setWindowTitle("Figure %d" % num)
image = os.path.join( matplotlib.rcParams['datapath'],'images','matplotlib.png' )
self.window.setWindowIcon(QtGui.QIcon( image ))
# Give the keyboard focus to the figure instead of the manager
self.canvas.setFocusPolicy( QtCore.Qt.ClickFocus )
self.canvas.setFocus()
QtCore.QObject.connect( self.window, QtCore.SIGNAL( 'destroyed()' ),
self._widgetclosed )
self.window._destroying = False
self.toolbar = self._get_toolbar(self.canvas, self.window)
self.window.addToolBar(self.toolbar)
QtCore.QObject.connect(self.toolbar, QtCore.SIGNAL("message"),
self.window.statusBar().showMessage)
self.window.setCentralWidget(self.canvas)
if matplotlib.is_interactive():
self.window.show()
# attach a show method to the figure for pylab ease of use
self.canvas.figure.show = lambda *args: self.window.show()
def notify_axes_change( fig ):
# This will be called whenever the current axes is changed
if self.toolbar != None: self.toolbar.update()
self.canvas.figure.add_axobserver( notify_axes_change )
def _widgetclosed( self ):
if self.window._destroying: return
self.window._destroying = True
Gcf.destroy(self.num)
def _get_toolbar(self, canvas, parent):
# must be inited after the window, drawingArea and figure
# attrs are set
if matplotlib.rcParams['toolbar'] == 'classic':
print "Classic toolbar is not supported"
elif matplotlib.rcParams['toolbar'] == 'toolbar2':
toolbar = NavigationToolbar2QT(canvas, parent, False)
else:
toolbar = None
return toolbar
def resize(self, width, height):
'set the canvas size in pixels'
self.window.resize(width, height)
def destroy( self, *args ):
if self.window._destroying: return
self.window._destroying = True
QtCore.QObject.disconnect( self.window, QtCore.SIGNAL( 'destroyed()' ),
self._widgetclosed )
if self.toolbar: self.toolbar.destroy()
if DEBUG: print "destroy figure manager"
self.window.close()
def set_window_title(self, title):
self.window.setWindowTitle(title)
class NavigationToolbar2QT( NavigationToolbar2, QtGui.QToolBar ):
def __init__(self, canvas, parent, coordinates=True):
""" coordinates: should we show the coordinates on the right? """
self.canvas = canvas
self.coordinates = coordinates
QtGui.QToolBar.__init__( self, parent )
NavigationToolbar2.__init__( self, canvas )
def _icon(self, name):
return QtGui.QIcon(os.path.join(self.basedir, name))
def _init_toolbar(self):
self.basedir = os.path.join(matplotlib.rcParams[ 'datapath' ],'images')
a = self.addAction(self._icon('home.svg'), 'Home', self.home)
a.setToolTip('Reset original view')
a = self.addAction(self._icon('back.svg'), 'Back', self.back)
a.setToolTip('Back to previous view')
a = self.addAction(self._icon('forward.svg'), 'Forward', self.forward)
a.setToolTip('Forward to next view')
self.addSeparator()
a = self.addAction(self._icon('move.svg'), 'Pan', self.pan)
a.setToolTip('Pan axes with left mouse, zoom with right')
a = self.addAction(self._icon('zoom_to_rect.svg'), 'Zoom', self.zoom)
a.setToolTip('Zoom to rectangle')
self.addSeparator()
a = self.addAction(self._icon('subplots.png'), 'Subplots',
self.configure_subplots)
a.setToolTip('Configure subplots')
a = self.addAction(self._icon('filesave.svg'), 'Save',
self.save_figure)
a.setToolTip('Save the figure')
self.buttons = {}
# Add the x,y location widget at the right side of the toolbar
# The stretch factor is 1 which means any resizing of the toolbar
# will resize this label instead of the buttons.
if self.coordinates:
self.locLabel = QtGui.QLabel( "", self )
self.locLabel.setAlignment(
QtCore.Qt.AlignRight | QtCore.Qt.AlignTop )
self.locLabel.setSizePolicy(
QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Ignored))
labelAction = self.addWidget(self.locLabel)
labelAction.setVisible(True)
# reference holder for subplots_adjust window
self.adj_window = None
def dynamic_update( self ):
self.canvas.draw()
def set_message( self, s ):
self.emit(QtCore.SIGNAL("message"), s)
if self.coordinates:
self.locLabel.setText(s.replace(', ', '\n'))
def set_cursor( self, cursor ):
if DEBUG: print 'Set cursor' , cursor
QtGui.QApplication.restoreOverrideCursor()
QtGui.QApplication.setOverrideCursor( QtGui.QCursor( cursord[cursor] ) )
def draw_rubberband( self, event, x0, y0, x1, y1 ):
height = self.canvas.figure.bbox.height
y1 = height - y1
y0 = height - y0
w = abs(x1 - x0)
h = abs(y1 - y0)
rect = [ int(val)for val in min(x0,x1), min(y0, y1), w, h ]
self.canvas.drawRectangle( rect )
def configure_subplots(self):
self.adj_window = QtGui.QMainWindow()
win = self.adj_window
win.setAttribute(QtCore.Qt.WA_DeleteOnClose)
win.setWindowTitle("Subplot Configuration Tool")
image = os.path.join( matplotlib.rcParams['datapath'],'images','matplotlib.png' )
win.setWindowIcon(QtGui.QIcon( image ))
tool = SubplotToolQt(self.canvas.figure, win)
win.setCentralWidget(tool)
win.setSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
win.show()
def _get_canvas(self, fig):
return FigureCanvasQT(fig)
def save_figure( self ):
filetypes = self.canvas.get_supported_filetypes_grouped()
sorted_filetypes = filetypes.items()
sorted_filetypes.sort()
default_filetype = self.canvas.get_default_filetype()
start = "image." + default_filetype
filters = []
selectedFilter = None
for name, exts in sorted_filetypes:
exts_list = " ".join(['*.%s' % ext for ext in exts])
filter = '%s (%s)' % (name, exts_list)
if default_filetype in exts:
selectedFilter = filter
filters.append(filter)
filters = ';;'.join(filters)
fname = QtGui.QFileDialog.getSaveFileName(
self, "Choose a filename to save to", start, filters, selectedFilter)
if fname:
try:
self.canvas.print_figure( unicode(fname) )
except Exception, e:
QtGui.QMessageBox.critical(
self, "Error saving file", str(e),
QtGui.QMessageBox.Ok, QtGui.QMessageBox.NoButton)
class SubplotToolQt( SubplotTool, QtGui.QWidget ):
def __init__(self, targetfig, parent):
QtGui.QWidget.__init__(self, None)
self.targetfig = targetfig
self.parent = parent
self.sliderleft = QtGui.QSlider(QtCore.Qt.Horizontal)
self.sliderbottom = QtGui.QSlider(QtCore.Qt.Vertical)
self.sliderright = QtGui.QSlider(QtCore.Qt.Horizontal)
self.slidertop = QtGui.QSlider(QtCore.Qt.Vertical)
self.sliderwspace = QtGui.QSlider(QtCore.Qt.Horizontal)
self.sliderhspace = QtGui.QSlider(QtCore.Qt.Vertical)
# constraints
QtCore.QObject.connect( self.sliderleft,
QtCore.SIGNAL( "valueChanged(int)" ),
self.sliderright.setMinimum )
QtCore.QObject.connect( self.sliderright,
QtCore.SIGNAL( "valueChanged(int)" ),
self.sliderleft.setMaximum )
QtCore.QObject.connect( self.sliderbottom,
QtCore.SIGNAL( "valueChanged(int)" ),
self.slidertop.setMinimum )
QtCore.QObject.connect( self.slidertop,
QtCore.SIGNAL( "valueChanged(int)" ),
self.sliderbottom.setMaximum )
sliders = (self.sliderleft, self.sliderbottom, self.sliderright,
self.slidertop, self.sliderwspace, self.sliderhspace, )
adjustments = ('left:', 'bottom:', 'right:', 'top:', 'wspace:', 'hspace:')
for slider, adjustment in zip(sliders, adjustments):
slider.setMinimum(0)
slider.setMaximum(1000)
slider.setSingleStep(5)
layout = QtGui.QGridLayout()
leftlabel = QtGui.QLabel('left')
layout.addWidget(leftlabel, 2, 0)
layout.addWidget(self.sliderleft, 2, 1)
toplabel = QtGui.QLabel('top')
layout.addWidget(toplabel, 0, 2)
layout.addWidget(self.slidertop, 1, 2)
layout.setAlignment(self.slidertop, QtCore.Qt.AlignHCenter)
bottomlabel = QtGui.QLabel('bottom')
layout.addWidget(QtGui.QLabel('bottom'), 4, 2)
layout.addWidget(self.sliderbottom, 3, 2)
layout.setAlignment(self.sliderbottom, QtCore.Qt.AlignHCenter)
rightlabel = QtGui.QLabel('right')
layout.addWidget(rightlabel, 2, 4)
layout.addWidget(self.sliderright, 2, 3)
hspacelabel = QtGui.QLabel('hspace')
layout.addWidget(hspacelabel, 0, 6)
layout.setAlignment(hspacelabel, QtCore.Qt.AlignHCenter)
layout.addWidget(self.sliderhspace, 1, 6)
layout.setAlignment(self.sliderhspace, QtCore.Qt.AlignHCenter)
wspacelabel = QtGui.QLabel('wspace')
layout.addWidget(wspacelabel, 4, 6)
layout.setAlignment(wspacelabel, QtCore.Qt.AlignHCenter)
layout.addWidget(self.sliderwspace, 3, 6)
layout.setAlignment(self.sliderwspace, QtCore.Qt.AlignBottom)
layout.setRowStretch(1,1)
layout.setRowStretch(3,1)
layout.setColumnStretch(1,1)
layout.setColumnStretch(3,1)
layout.setColumnStretch(6,1)
self.setLayout(layout)
self.sliderleft.setSliderPosition(int(targetfig.subplotpars.left*1000))
self.sliderbottom.setSliderPosition(\
int(targetfig.subplotpars.bottom*1000))
self.sliderright.setSliderPosition(\
int(targetfig.subplotpars.right*1000))
self.slidertop.setSliderPosition(int(targetfig.subplotpars.top*1000))
self.sliderwspace.setSliderPosition(\
int(targetfig.subplotpars.wspace*1000))
self.sliderhspace.setSliderPosition(\
int(targetfig.subplotpars.hspace*1000))
QtCore.QObject.connect( self.sliderleft,
QtCore.SIGNAL( "valueChanged(int)" ),
self.funcleft )
QtCore.QObject.connect( self.sliderbottom,
QtCore.SIGNAL( "valueChanged(int)" ),
self.funcbottom )
QtCore.QObject.connect( self.sliderright,
QtCore.SIGNAL( "valueChanged(int)" ),
self.funcright )
QtCore.QObject.connect( self.slidertop,
QtCore.SIGNAL( "valueChanged(int)" ),
self.functop )
QtCore.QObject.connect( self.sliderwspace,
QtCore.SIGNAL( "valueChanged(int)" ),
self.funcwspace )
QtCore.QObject.connect( self.sliderhspace,
QtCore.SIGNAL( "valueChanged(int)" ),
self.funchspace )
def funcleft(self, val):
if val == self.sliderright.value():
val -= 1
self.targetfig.subplots_adjust(left=val/1000.)
if self.drawon: self.targetfig.canvas.draw()
def funcright(self, val):
if val == self.sliderleft.value():
val += 1
self.targetfig.subplots_adjust(right=val/1000.)
if self.drawon: self.targetfig.canvas.draw()
def funcbottom(self, val):
if val == self.slidertop.value():
val -= 1
self.targetfig.subplots_adjust(bottom=val/1000.)
if self.drawon: self.targetfig.canvas.draw()
def functop(self, val):
if val == self.sliderbottom.value():
val += 1
self.targetfig.subplots_adjust(top=val/1000.)
if self.drawon: self.targetfig.canvas.draw()
def funcwspace(self, val):
self.targetfig.subplots_adjust(wspace=val/1000.)
if self.drawon: self.targetfig.canvas.draw()
def funchspace(self, val):
self.targetfig.subplots_adjust(hspace=val/1000.)
if self.drawon: self.targetfig.canvas.draw()
def error_msg_qt( msg, parent=None ):
if not is_string_like( msg ):
msg = ','.join( map( str,msg ) )
QtGui.QMessageBox.warning( None, "Matplotlib", msg, QtGui.QMessageBox.Ok )
def exception_handler( type, value, tb ):
"""Handle uncaught exceptions
It does not catch SystemExit
"""
msg = ''
# get the filename attribute if available (for IOError)
if hasattr(value, 'filename') and value.filename != None:
msg = value.filename + ': '
if hasattr(value, 'strerror') and value.strerror != None:
msg += value.strerror
else:
msg += str(value)
if len( msg ) : error_msg_qt( msg )
FigureManager = FigureManagerQT
| agpl-3.0 |
russel1237/scikit-learn | sklearn/ensemble/partial_dependence.py | 251 | 15097 | """Partial dependence plots for tree ensembles. """
# Authors: Peter Prettenhofer
# License: BSD 3 clause
from itertools import count
import numbers
import numpy as np
from scipy.stats.mstats import mquantiles
from ..utils.extmath import cartesian
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import map, range, zip
from ..utils import check_array
from ..tree._tree import DTYPE
from ._gradient_boosting import _partial_dependence_tree
from .gradient_boosting import BaseGradientBoosting
def _grid_from_X(X, percentiles=(0.05, 0.95), grid_resolution=100):
"""Generate a grid of points based on the ``percentiles of ``X``.
The grid is generated by placing ``grid_resolution`` equally
spaced points between the ``percentiles`` of each column
of ``X``.
Parameters
----------
X : ndarray
The data
percentiles : tuple of floats
The percentiles which are used to construct the extreme
values of the grid axes.
grid_resolution : int
The number of equally spaced points that are placed
on the grid.
Returns
-------
grid : ndarray
All data points on the grid; ``grid.shape[1] == X.shape[1]``
and ``grid.shape[0] == grid_resolution * X.shape[1]``.
axes : seq of ndarray
The axes with which the grid has been created.
"""
if len(percentiles) != 2:
raise ValueError('percentile must be tuple of len 2')
if not all(0. <= x <= 1. for x in percentiles):
raise ValueError('percentile values must be in [0, 1]')
axes = []
for col in range(X.shape[1]):
uniques = np.unique(X[:, col])
if uniques.shape[0] < grid_resolution:
# feature has low resolution use unique vals
axis = uniques
else:
emp_percentiles = mquantiles(X, prob=percentiles, axis=0)
# create axis based on percentiles and grid resolution
axis = np.linspace(emp_percentiles[0, col],
emp_percentiles[1, col],
num=grid_resolution, endpoint=True)
axes.append(axis)
return cartesian(axes), axes
def partial_dependence(gbrt, target_variables, grid=None, X=None,
percentiles=(0.05, 0.95), grid_resolution=100):
"""Partial dependence of ``target_variables``.
Partial dependence plots show the dependence between the joint values
of the ``target_variables`` and the function represented
by the ``gbrt``.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
target_variables : array-like, dtype=int
The target features for which the partial dependecy should be
computed (size should be smaller than 3 for visual renderings).
grid : array-like, shape=(n_points, len(target_variables))
The grid of ``target_variables`` values for which the
partial dependecy should be evaluated (either ``grid`` or ``X``
must be specified).
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained. It is used to generate
a ``grid`` for the ``target_variables``. The ``grid`` comprises
``grid_resolution`` equally spaced points between the two
``percentiles``.
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used create the extreme values
for the ``grid``. Only if ``X`` is not None.
grid_resolution : int, default=100
The number of equally spaced points on the ``grid``.
Returns
-------
pdp : array, shape=(n_classes, n_points)
The partial dependence function evaluated on the ``grid``.
For regression and binary classification ``n_classes==1``.
axes : seq of ndarray or None
The axes with which the grid has been created or None if
the grid has been given.
Examples
--------
>>> samples = [[0, 0, 2], [1, 0, 0]]
>>> labels = [0, 1]
>>> from sklearn.ensemble import GradientBoostingClassifier
>>> gb = GradientBoostingClassifier(random_state=0).fit(samples, labels)
>>> kwargs = dict(X=samples, percentiles=(0, 1), grid_resolution=2)
>>> partial_dependence(gb, [0], **kwargs) # doctest: +SKIP
(array([[-4.52..., 4.52...]]), [array([ 0., 1.])])
"""
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
if (grid is None and X is None) or (grid is not None and X is not None):
raise ValueError('Either grid or X must be specified')
target_variables = np.asarray(target_variables, dtype=np.int32,
order='C').ravel()
if any([not (0 <= fx < gbrt.n_features) for fx in target_variables]):
raise ValueError('target_variables must be in [0, %d]'
% (gbrt.n_features - 1))
if X is not None:
X = check_array(X, dtype=DTYPE, order='C')
grid, axes = _grid_from_X(X[:, target_variables], percentiles,
grid_resolution)
else:
assert grid is not None
# dont return axes if grid is given
axes = None
# grid must be 2d
if grid.ndim == 1:
grid = grid[:, np.newaxis]
if grid.ndim != 2:
raise ValueError('grid must be 2d but is %dd' % grid.ndim)
grid = np.asarray(grid, dtype=DTYPE, order='C')
assert grid.shape[1] == target_variables.shape[0]
n_trees_per_stage = gbrt.estimators_.shape[1]
n_estimators = gbrt.estimators_.shape[0]
pdp = np.zeros((n_trees_per_stage, grid.shape[0],), dtype=np.float64,
order='C')
for stage in range(n_estimators):
for k in range(n_trees_per_stage):
tree = gbrt.estimators_[stage, k].tree_
_partial_dependence_tree(tree, grid, target_variables,
gbrt.learning_rate, pdp[k])
return pdp, axes
def plot_partial_dependence(gbrt, X, features, feature_names=None,
label=None, n_cols=3, grid_resolution=100,
percentiles=(0.05, 0.95), n_jobs=1,
verbose=0, ax=None, line_kw=None,
contour_kw=None, **fig_kw):
"""Partial dependence plots for ``features``.
The ``len(features)`` plots are arranged in a grid with ``n_cols``
columns. Two-way partial dependence plots are plotted as contour
plots.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained.
features : seq of tuples or ints
If seq[i] is an int or a tuple with one int value, a one-way
PDP is created; if seq[i] is a tuple of two ints, a two-way
PDP is created.
feature_names : seq of str
Name of each feature; feature_names[i] holds
the name of the feature with index i.
label : object
The class label for which the PDPs should be computed.
Only if gbrt is a multi-class model. Must be in ``gbrt.classes_``.
n_cols : int
The number of columns in the grid plot (default: 3).
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used to create the extreme values
for the PDP axes.
grid_resolution : int, default=100
The number of equally spaced points on the axes.
n_jobs : int
The number of CPUs to use to compute the PDs. -1 means 'all CPUs'.
Defaults to 1.
verbose : int
Verbose output during PD computations. Defaults to 0.
ax : Matplotlib axis object, default None
An axis object onto which the plots will be drawn.
line_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For one-way partial dependence plots.
contour_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For two-way partial dependence plots.
fig_kw : dict
Dict with keywords passed to the figure() call.
Note that all keywords not recognized above will be automatically
included here.
Returns
-------
fig : figure
The Matplotlib Figure object.
axs : seq of Axis objects
A seq of Axis objects, one for each subplot.
Examples
--------
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.ensemble import GradientBoostingRegressor
>>> X, y = make_friedman1()
>>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)
>>> fig, axs = plot_partial_dependence(clf, X, [0, (0, 1)]) #doctest: +SKIP
...
"""
import matplotlib.pyplot as plt
from matplotlib import transforms
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import ScalarFormatter
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
# set label_idx for multi-class GBRT
if hasattr(gbrt, 'classes_') and np.size(gbrt.classes_) > 2:
if label is None:
raise ValueError('label is not given for multi-class PDP')
label_idx = np.searchsorted(gbrt.classes_, label)
if gbrt.classes_[label_idx] != label:
raise ValueError('label %s not in ``gbrt.classes_``' % str(label))
else:
# regression and binary classification
label_idx = 0
X = check_array(X, dtype=DTYPE, order='C')
if gbrt.n_features != X.shape[1]:
raise ValueError('X.shape[1] does not match gbrt.n_features')
if line_kw is None:
line_kw = {'color': 'green'}
if contour_kw is None:
contour_kw = {}
# convert feature_names to list
if feature_names is None:
# if not feature_names use fx indices as name
feature_names = [str(i) for i in range(gbrt.n_features)]
elif isinstance(feature_names, np.ndarray):
feature_names = feature_names.tolist()
def convert_feature(fx):
if isinstance(fx, six.string_types):
try:
fx = feature_names.index(fx)
except ValueError:
raise ValueError('Feature %s not in feature_names' % fx)
return fx
# convert features into a seq of int tuples
tmp_features = []
for fxs in features:
if isinstance(fxs, (numbers.Integral,) + six.string_types):
fxs = (fxs,)
try:
fxs = np.array([convert_feature(fx) for fx in fxs], dtype=np.int32)
except TypeError:
raise ValueError('features must be either int, str, or tuple '
'of int/str')
if not (1 <= np.size(fxs) <= 2):
raise ValueError('target features must be either one or two')
tmp_features.append(fxs)
features = tmp_features
names = []
try:
for fxs in features:
l = []
# explicit loop so "i" is bound for exception below
for i in fxs:
l.append(feature_names[i])
names.append(l)
except IndexError:
raise ValueError('features[i] must be in [0, n_features) '
'but was %d' % i)
# compute PD functions
pd_result = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(partial_dependence)(gbrt, fxs, X=X,
grid_resolution=grid_resolution,
percentiles=percentiles)
for fxs in features)
# get global min and max values of PD grouped by plot type
pdp_lim = {}
for pdp, axes in pd_result:
min_pd, max_pd = pdp[label_idx].min(), pdp[label_idx].max()
n_fx = len(axes)
old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd))
min_pd = min(min_pd, old_min_pd)
max_pd = max(max_pd, old_max_pd)
pdp_lim[n_fx] = (min_pd, max_pd)
# create contour levels for two-way plots
if 2 in pdp_lim:
Z_level = np.linspace(*pdp_lim[2], num=8)
if ax is None:
fig = plt.figure(**fig_kw)
else:
fig = ax.get_figure()
fig.clear()
n_cols = min(n_cols, len(features))
n_rows = int(np.ceil(len(features) / float(n_cols)))
axs = []
for i, fx, name, (pdp, axes) in zip(count(), features, names,
pd_result):
ax = fig.add_subplot(n_rows, n_cols, i + 1)
if len(axes) == 1:
ax.plot(axes[0], pdp[label_idx].ravel(), **line_kw)
else:
# make contour plot
assert len(axes) == 2
XX, YY = np.meshgrid(axes[0], axes[1])
Z = pdp[label_idx].reshape(list(map(np.size, axes))).T
CS = ax.contour(XX, YY, Z, levels=Z_level, linewidths=0.5,
colors='k')
ax.contourf(XX, YY, Z, levels=Z_level, vmax=Z_level[-1],
vmin=Z_level[0], alpha=0.75, **contour_kw)
ax.clabel(CS, fmt='%2.2f', colors='k', fontsize=10, inline=True)
# plot data deciles + axes labels
deciles = mquantiles(X[:, fx[0]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transData,
ax.transAxes)
ylim = ax.get_ylim()
ax.vlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_xlabel(name[0])
ax.set_ylim(ylim)
# prevent x-axis ticks from overlapping
ax.xaxis.set_major_locator(MaxNLocator(nbins=6, prune='lower'))
tick_formatter = ScalarFormatter()
tick_formatter.set_powerlimits((-3, 4))
ax.xaxis.set_major_formatter(tick_formatter)
if len(axes) > 1:
# two-way PDP - y-axis deciles + labels
deciles = mquantiles(X[:, fx[1]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transAxes,
ax.transData)
xlim = ax.get_xlim()
ax.hlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_ylabel(name[1])
# hline erases xlim
ax.set_xlim(xlim)
else:
ax.set_ylabel('Partial dependence')
if len(axes) == 1:
ax.set_ylim(pdp_lim[1])
axs.append(ax)
fig.subplots_adjust(bottom=0.15, top=0.7, left=0.1, right=0.95, wspace=0.4,
hspace=0.3)
return fig, axs
| bsd-3-clause |
danmelamed/vowpal_wabbit | utl/vw-hyperopt.py | 8 | 15623 | #!/usr/bin/env python
# coding: utf-8
"""
Github version of hyperparameter optimization for Vowpal Wabbit via hyperopt
"""
__author__ = 'kurtosis'
from hyperopt import hp, fmin, tpe, rand, Trials, STATUS_OK
from sklearn.metrics import roc_curve, auc, log_loss, precision_recall_curve
import numpy as np
from datetime import datetime as dt
import subprocess, shlex
from math import exp, log
import argparse
import re
import logging
import json
import matplotlib
from matplotlib import pyplot as plt
try:
import seaborn as sns
except ImportError:
print ("Warning: seaborn is not installed. "
"Without seaborn, standard matplotlib plots will not look very charming. "
"It's recommended to install it via pip install seaborn")
def read_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--searcher', type=str, default='tpe', choices=['tpe', 'rand'])
parser.add_argument('--max_evals', type=int, default=100)
parser.add_argument('--train', type=str, required=True, help="training set")
parser.add_argument('--holdout', type=str, required=True, help="holdout set")
parser.add_argument('--vw_space', type=str, required=True, help="hyperparameter search space (must be 'quoted')")
parser.add_argument('--outer_loss_function', default='logistic',
choices=['logistic', 'roc-auc']) # TODO: implement squared, hinge, quantile, PR-auc
parser.add_argument('--regression', action='store_true', default=False, help="""regression (continuous class labels)
or classification (-1 or 1, default value).""")
parser.add_argument('--plot', action='store_true', default=False, help=("Plot the results in the end. "
"Requires matplotlib and "
"(optionally) seaborn to be installed."))
args = parser.parse_args()
return args
class HyperoptSpaceConstructor(object):
"""
Takes command-line input and transforms it into hyperopt search space
An example of command-line input:
--algorithms=ftrl,sgd --l2=1e-8..1e-4~LO -l=0.01..10~L --ftrl_beta=0.01..1 --passes=1..10~I -q=SE+SZ+DR,SE~O
"""
def __init__(self, command):
self.command = command
self.space = None
self.algorithm_metadata = {
'ftrl': {'arg': '--ftrl', 'prohibited_flags': set()},
'sgd': {'arg': '', 'prohibited_flags': {'--ftrl_alpha', '--ftrl_beta'}}
}
self.range_pattern = re.compile("[^~]+") # re.compile("(?<=\[).+(?=\])")
self.distr_pattern = re.compile("(?<=~)[IOL]*") # re.compile("(?<=\])[IOL]*")
self.only_continuous = re.compile("(?<=~)[IL]*") # re.compile("(?<=\])[IL]*")
def _process_vw_argument(self, arg, value, algorithm):
try:
distr_part = self.distr_pattern.findall(value)[0]
except IndexError:
distr_part = ''
range_part = self.range_pattern.findall(value)[0]
is_continuous = '..' in range_part
ocd = self.only_continuous.findall(value)
if not is_continuous and len(ocd)> 0 and ocd[0] != '':
raise ValueError(("Need a range instead of a list of discrete values to define "
"uniform or log-uniform distribution. "
"Please, use [min..max]%s form") % (distr_part))
if is_continuous and arg == '-q':
raise ValueError(("You must directly specify namespaces for quadratic features "
"as a list of values, not as a parametric distribution"))
hp_choice_name = "_".join([algorithm, arg.replace('-', '')])
try_omit_zero = 'O' in distr_part
distr_part = distr_part.replace('O', '')
if is_continuous:
vmin, vmax = [float(i) for i in range_part.split('..')]
if distr_part == 'L':
distrib = hp.loguniform(hp_choice_name, log(vmin), log(vmax))
elif distr_part == '':
distrib = hp.uniform(hp_choice_name, vmin, vmax)
elif distr_part == 'I':
distrib = hp.quniform(hp_choice_name, vmin, vmax, 1)
elif distr_part in {'LI', 'IL'}:
distrib = hp.qloguniform(hp_choice_name, log(vmin), log(vmax), 1)
else:
raise ValueError("Cannot recognize distribution: %s" % (distr_part))
else:
possible_values = range_part.split(',')
if arg == '-q':
possible_values = [v.replace('+', ' -q ') for v in possible_values]
distrib = hp.choice(hp_choice_name, possible_values)
if try_omit_zero:
hp_choice_name_outer = hp_choice_name + '_outer'
distrib = hp.choice(hp_choice_name_outer, ['omit', distrib])
return distrib
def string_to_pyll(self):
line = shlex.split(self.command)
algorithms = ['sgd']
for arg in line:
arg, value = arg.split('=')
if arg == '--algorithms':
algorithms = set(self.range_pattern.findall(value)[0].split(','))
if tuple(self.distr_pattern.findall(value)) not in {(), ('O',)}:
raise ValueError(("Distribution options are prohibited for --algorithms flag. "
"Simply list the algorithms instead (like --algorithms=ftrl,sgd)"))
elif self.distr_pattern.findall(value) == ['O']:
algorithms.add('sgd')
for algo in algorithms:
if algo not in self.algorithm_metadata:
raise NotImplementedError(("%s algorithm is not found. "
"Supported algorithms by now are %s")
% (algo, str(self.algorithm_metadata.keys())))
break
self.space = {algo: {'type': algo, 'argument': self.algorithm_metadata[algo]['arg']} for algo in algorithms}
for algo in algorithms:
for arg in line:
arg, value = arg.split('=')
if arg == '--algorithms':
continue
if arg not in self.algorithm_metadata[algo]['prohibited_flags']:
distrib = self._process_vw_argument(arg, value, algo)
self.space[algo][arg] = distrib
else:
pass
self.space = hp.choice('algorithm', self.space.values())
class HyperOptimizer(object):
def __init__(self, train_set, holdout_set, command, max_evals=100,
outer_loss_function='logistic',
searcher='tpe', is_regression=False):
self.train_set = train_set
self.holdout_set = holdout_set
self.train_model = './current.model'
self.holdout_pred = './holdout.pred'
self.trials_output = './trials.json'
self.hyperopt_progress_plot = './hyperopt_progress.png'
self.log = './log.log'
self.logger = self._configure_logger()
# hyperopt parameter sample, converted into a string with flags
self.param_suffix = None
self.train_command = None
self.validate_command = None
self.y_true_train = []
self.y_true_holdout = []
self.outer_loss_function = outer_loss_function
self.space = self._get_space(command)
self.max_evals = max_evals
self.searcher = searcher
self.is_regression = is_regression
self.trials = Trials()
self.current_trial = 0
def _get_space(self, command):
hs = HyperoptSpaceConstructor(command)
hs.string_to_pyll()
return hs.space
def _configure_logger(self):
LOGGER_FORMAT = "%(asctime)s,%(msecs)03d %(levelname)-8s [%(name)s/%(module)s:%(lineno)d]: %(message)s"
LOGGER_DATEFMT = "%Y-%m-%d %H:%M:%S"
LOGFILE = self.log
logging.basicConfig(format=LOGGER_FORMAT,
datefmt=LOGGER_DATEFMT,
level=logging.DEBUG)
formatter = logging.Formatter(LOGGER_FORMAT, datefmt=LOGGER_DATEFMT)
file_handler = logging.FileHandler(LOGFILE)
file_handler.setFormatter(formatter)
logger = logging.getLogger()
logger.addHandler(file_handler)
return logger
def get_hyperparam_string(self, **kwargs):
for arg in ['--passes']: #, '--rank', '--lrq']:
if arg in kwargs:
kwargs[arg] = int(kwargs[arg])
#print 'KWARGS: ', kwargs
flags = [key for key in kwargs if key.startswith('-')]
for flag in flags:
if kwargs[flag] == 'omit':
del kwargs[flag]
self.param_suffix = ' '.join(['%s %s' % (key, kwargs[key]) for key in kwargs if key.startswith('-')])
self.param_suffix += ' %s' % (kwargs['argument'])
def compose_vw_train_command(self):
data_part = ('vw -d %s -f %s --holdout_off -c '
% (self.train_set, self.train_model))
self.train_command = ' '.join([data_part, self.param_suffix])
def compose_vw_validate_command(self):
data_part = 'vw -t -d %s -i %s -p %s --holdout_off -c' \
% (self.holdout_set, self.train_model, self.holdout_pred)
self.validate_command = data_part
def fit_vw(self):
self.compose_vw_train_command()
self.logger.info("executing the following command (training): %s" % self.train_command)
subprocess.call(shlex.split(self.train_command))
def validate_vw(self):
self.compose_vw_validate_command()
self.logger.info("executing the following command (validation): %s" % self.validate_command)
subprocess.call(shlex.split(self.validate_command))
def get_y_true_train(self):
self.logger.info("loading true train class labels...")
yh = open(self.train_set, 'r')
self.y_true_train = []
for line in yh:
self.y_true_train.append(int(line.strip()[0:2]))
if not self.is_regression:
self.y_true_train = [(i + 1.) / 2 for i in self.y_true_train]
self.logger.info("train length: %d" % len(self.y_true_train))
def get_y_true_holdout(self):
self.logger.info("loading true holdout class labels...")
yh = open(self.holdout_set, 'r')
self.y_true_holdout = []
for line in yh:
self.y_true_holdout.append(int(line.strip()[0:2]))
if not self.is_regression:
self.y_true_holdout = [(i + 1.) / 2 for i in self.y_true_holdout]
self.logger.info("holdout length: %d" % len(self.y_true_holdout))
def validation_metric_vw(self):
v = open('%s' % self.holdout_pred, 'r')
y_pred_holdout = []
for line in v:
y_pred_holdout.append(float(line.split()[0].strip()))
if self.outer_loss_function == 'logistic':
y_pred_holdout_proba = [1. / (1 + exp(-i)) for i in y_pred_holdout]
loss = log_loss(self.y_true_holdout, y_pred_holdout_proba)
elif self.outer_loss_function == 'squared': # TODO: write it
pass
elif self.outer_loss_function == 'hinge': # TODO: write it
pass
elif self.outer_loss_function == 'roc-auc':
y_pred_holdout_proba = [1. / (1 + exp(-i)) for i in y_pred_holdout]
fpr, tpr, _ = roc_curve(self.y_true_holdout, y_pred_holdout_proba)
loss = -auc(fpr, tpr)
self.logger.info('parameter suffix: %s' % self.param_suffix)
self.logger.info('loss value: %.6f' % loss)
return loss
def hyperopt_search(self, parallel=False): # TODO: implement parallel search with MongoTrials
def objective(kwargs):
start = dt.now()
self.current_trial += 1
self.logger.info('\n\nStarting trial no.%d' % self.current_trial)
self.get_hyperparam_string(**kwargs)
self.fit_vw()
self.validate_vw()
loss = self.validation_metric_vw()
finish = dt.now()
elapsed = finish - start
self.logger.info("evaluation time for this step: %s" % str(elapsed))
# clean up
subprocess.call(shlex.split('rm %s %s' % (self.train_model, self.holdout_pred)))
to_return = {'status': STATUS_OK,
'loss': loss, # TODO: include also train loss tracking in order to prevent overfitting
'eval_time': elapsed.seconds,
'train_command': self.train_command,
'current_trial': self.current_trial
}
return to_return
self.trials = Trials()
if self.searcher == 'tpe':
algo = tpe.suggest
elif self.searcher == 'rand':
algo = rand.suggest
logging.debug("starting hypersearch...")
best_params = fmin(objective, space=self.space, trials=self.trials, algo=algo, max_evals=self.max_evals)
self.logger.debug("the best hyperopt parameters: %s" % str(best_params))
json.dump(self.trials.results, open(self.trials_output, 'w'))
self.logger.info('All the trials results are saved at %s' % self.trials_output)
best_configuration = self.trials.results[np.argmin(self.trials.losses())]['train_command']
best_loss = self.trials.results[np.argmin(self.trials.losses())]['loss']
self.logger.info("\n\nA full training command with the best hyperparameters: \n%s\n\n" % best_configuration)
self.logger.info("\n\nThe best holdout loss value: \n%s\n\n" % best_loss)
return best_configuration, best_loss
def plot_progress(self):
try:
sns.set_palette('Set2')
sns.set_style("darkgrid", {"axes.facecolor": ".95"})
except:
pass
self.logger.debug('plotting...')
plt.figure(figsize=(15,10))
plt.subplot(211)
plt.plot(self.trials.losses(), '.', markersize=12)
plt.title('Per-Iteration Outer Loss', fontsize=16)
plt.ylabel('Outer loss function value')
if self.outer_loss_function in ['logloss']:
plt.yscale('log')
xticks = [int(i) for i in np.linspace(plt.xlim()[0], plt.xlim()[1], min(len(self.trials.losses()), 11))]
plt.xticks(xticks, xticks)
plt.subplot(212)
plt.plot(np.minimum.accumulate(self.trials.losses()), '.', markersize=12)
plt.title('Cumulative Minimum Outer Loss', fontsize=16)
plt.xlabel('Iteration number')
plt.ylabel('Outer loss function value')
xticks = [int(i) for i in np.linspace(plt.xlim()[0], plt.xlim()[1], min(len(self.trials.losses()), 11))]
plt.xticks(xticks, xticks)
plt.tight_layout()
plt.savefig(self.hyperopt_progress_plot)
self.logger.info('The diagnostic hyperopt progress plot is saved: %s' % self.hyperopt_progress_plot)
def main():
args = read_arguments()
h = HyperOptimizer(train_set=args.train, holdout_set=args.holdout, command=args.vw_space,
max_evals=args.max_evals,
outer_loss_function=args.outer_loss_function,
searcher=args.searcher, is_regression=args.regression)
h.get_y_true_holdout()
h.hyperopt_search()
if args.plot:
h.plot_progress()
if __name__ == '__main__':
main() | bsd-3-clause |
wathen/PhD | MHD/FEniCS/VectorLaplacian/BDMLaplacian.py | 1 | 5571 | # from MatrixOperations import *
from dolfin import *
import ipdb
import scipy.linalg
import numpy as np
import matplotlib.pyplot as plt
# from MatrixOperations import *
# MO = MatrixOperations()
m =7
errL2 = np.zeros((m-1,1))
errDIV= np.zeros((m-1,1))
errH1 = np.zeros((m-1,1))
errDG = np.zeros((m-1,1))
NN = np.zeros((m-1,1))
DoF = np.zeros((m-1,1))
nn = 2
dim = 2
Solving = 'yes'
Saving = 'no'
if Saving == 'yes':
parameters['linear_algebra_backend'] = 'Epetra'
else:
parameters['linear_algebra_backend'] = 'PETSc'
for xx in xrange(1,m):
print xx
nn = 2**xx
NN[xx-1] = nn
# Create mesh and define function space
nn = int(nn)
if dim == 3:
mesh = UnitCubeMesh(nn,nn,nn)
else:
mesh = UnitSquareMesh(nn,nn)
V =FunctionSpace(mesh, "BDM", 1 )
# creating trial and test function s
v = TestFunction(V)
u = TrialFunction(V)
def boundary(x, on_boundary):
return on_boundary
# Creating expressions along the boundary
if dim == 3:
u0 = Expression(("0","0","0"))
else:
u0 = Expression(('(x[1]*x[1]-x[1])*(x[0]*x[0]-x[0])+x[0]*x[1]','(x[1]*x[1]-x[1])*(x[0]*x[0]-x[0])+x[0]*x[1]'))
N = FacetNormal(mesh)
# defining boundary conditions
bcs = DirichletBC(V,u0, boundary)
# Creating RHS function
if dim == 3:
f = Expression(('- 2*(x[1]*x[1]-x[1])*(x[0]*x[0]-x[0])-2*(x[0]*x[0]-x[0])*(x[2]*x[2]-x[2])-2*(x[1]*x[1]-x[1])*(x[2]*x[2]-x[2])', \
'- 2*(x[1]*x[1]-x[1])*(x[0]*x[0]-x[0])-2*(x[0]*x[0]-x[0])*(x[2]*x[2]-x[2])-2*(x[1]*x[1]-x[1])*(x[2]*x[2]-x[2])', \
'- 2*(x[1]*x[1]-x[1])*(x[0]*x[0]-x[0])-2*(x[0]*x[0]-x[0])*(x[2]*x[2]-x[2])-2*(x[1]*x[1]-x[1])*(x[2]*x[2]-x[2])'))
else:
f = Expression(('- 2*(x[1]*x[1]-x[1])-2*(x[0]*x[0]-x[0])','-2*(x[0]*x[0]-x[0]) - 2*(x[1]*x[1]-x[1])'))
# f = Expression(("0","0")
# defining normal component
h = CellSize(mesh)
h_avg =avg(h)
alpha = 10.0
gamma =10.0
t = as_vector((-N[0], N[1]))
inside = avg(outer(N,grad(inner(v,t))))
# tic()
# a = inner(grad(v), grad(u))*dx \
# - inner(avg(outer(N,grad(inner(v,t)))), outer(u('+'),N('+'))+outer(u('-'),N('-')))*dS \
# - inner(outer(v('+'),N('+'))+outer(v('-'),N('-')), avg(outer(N,grad(inner(u,t)))))*dS \
# + alpha/h_avg*inner(outer(v('+'),N('+'))+outer(v('-'),N('-')),outer(u('+'),N('+'))+outer(u('-'),N('-')))*dS \
# - inner(v, grad(inner(u,N)))*ds \
# - inner(grad(v,in), u)*ds \
# + gamma/h*inner(v,u)*ds
a = inner(grad(v), grad(u))*dx \
- inner(avg(grad(v)), outer(u('+'),N('+'))+outer(u('-'),N('-')))*dS \
- inner(outer(v('+'),N('+'))+outer(v('-'),N('-')), avg(grad(u)))*dS \
+ alpha/h_avg*inner(outer(v('+'),N('+'))+outer(v('-'),N('-')),outer(u('+'),N('+'))+outer(u('-'),N('-')))*dS \
- inner(outer(v,N), grad(u))*ds \
- inner(grad(v), outer(u,N))*ds \
+ gamma/h*inner(v,u)*ds
# - inner(outer(v,N), grad(u))*ds \
# - inner(grad(v), outer(u,N))*ds \
L = inner(v,f)*dx+ gamma/h*inner(u0,v)*ds - inner(grad(v),outer(u0,N))*ds
# - inner(grad(v), outer(u,N))*ds \
# b =- dot(outer(v,n), grad(u))*ds \
# - dot(grad(v), outer(u,n))*ds
# assemebling system
AA,bb = assemble_system(a,L,bcs)
DoF[xx-1] = bb.array().size
u = Function(V)
if Solving == 'yes':
# tic()
# set_log_level(PROGRESS)
# solver = KrylovSolver("cg","amg")
# solver.parameters["relative_tolerance"] = 1e-10
# solver.parameters["absolute_tolerance"] = 1e-10
# solver.solve(AA,u.vector(),bb)
# set_log_level(PROGRESS)
# print 'time to solve linear system', toc(),'\n\n'
print 'DoF', DoF[xx-1]
solve(a==L,u)
if dim == 3:
ue = Expression(('x[0]*x[1]*x[2]*(x[1]-1)*(x[2]-1)*(x[0]-1)','x[0]*x[1]*x[2]*(x[1]-1)*(x[2]-1)*(x[0]-1)','x[0]*x[1]*x[2]*(x[1]-1)*(x[2]-1)*(x[0]-1)'))
else:
#ue = Expression(('(x[1]*x[1]-x[1])*(x[0]*x[0]-x[0])','(x[1]*x[1]-x[1])*(x[0]*x[0]-x[0])'))
ue = Expression(('(x[1]*x[1]-x[1])*(x[0]*x[0]-x[0])+x[0]*x[1]','(x[1]*x[1]-x[1])*(x[0]*x[0]-x[0])+x[0]*x[1]'))
# ue = Expression(('x[0]*x[1]','x[0]*x[1]'))
e = ue- Function(V,u)
uu= Function(V,u)
errL2[xx-1]=errornorm(ue,Function(V,u),norm_type="L2", degree_rise=4,mesh=mesh)
errDIV[xx-1]=errornorm(ue,Function(V,u),norm_type="Hdiv", degree_rise=4,mesh=mesh)
errH1[xx-1]=errornorm(ue,Function(V,u),norm_type="H1", degree_rise=4,mesh=mesh)
errDG[xx-1] = errL2[xx-1] +errH1[xx-1]
print errL2[xx-1],errDIV[xx-1],errH1[xx-1],errDG[xx-1]
####if Saving == 'yes':
#MO.SaveEpertaMatrix(AA.down_cast().mat(),"A2d")
# plot(u)
# plot(interpolate(ue,V))
# interactive()
plt.loglog(NN,errL2)
plt.title('Error plot for BDM2 elements - L2 convergence = %f' % np.log2(np.average((errL2[0:m-2]/errL2[1:m-1]))))
plt.xlabel('N')
plt.ylabel('L2 error')
plt.figure()
plt.loglog(NN,errDIV)
plt.title('Error plot for BDM2 elements - Hdiv convergence = %f' % np.log2(np.average((errDIV[0:m-2]/errDIV[1:m-1]))))
plt.xlabel('N')
plt.ylabel('Hdiv error')
plt.figure()
plt.loglog(NN,errH1)
plt.title('Error plot for BDM2 elements - H1 convergence = %f' % np.log2(np.average((errH1[0:m-2]/errH1[1:m-1]))))
plt.xlabel('N')
plt.ylabel('H1 error')
plt.figure()
plt.loglog(NN,errDG)
plt.title('Error plot for BDM2 elements - DG convergence = %f' % np.log2(np.average((errDG[0:m-2]/errDG[1:m-1]))))
plt.xlabel('N')
plt.ylabel('H1 error')
plt.show()
| mit |
PmagPy/PmagPy | programs/foldtest_magic2.py | 2 | 7984 | #!/usr/bin/env python
import sys
import numpy
import matplotlib
if matplotlib.get_backend() != "TKAgg":
matplotlib.use("TKAgg")
import pylab
import pmagpy.pmag as pmag
import pmagpy.pmagplotlib as pmagplotlib
from pmag_env import set_env
def main():
"""
NAME
foldtest_magic.py
DESCRIPTION
does a fold test (Tauxe, 2010) on data
INPUT FORMAT
pmag_specimens format file, er_samples.txt format file (for bedding)
SYNTAX
foldtest_magic.py [command line options]
OPTIONS
-h prints help message and quits
-f pmag_sites formatted file [default is pmag_sites.txt]
-fsa er_samples formatted file [default is er_samples.txt]
-fsi er_sites formatted file
-exc use pmag_criteria.txt to set acceptance criteria
-n NB, set number of bootstraps, default is 1000
-b MIN, MAX, set bounds for untilting, default is -10, 150
-fmt FMT, specify format - default is svg
-sav saves plots and quits
OUTPUT
Geographic: is an equal area projection of the input data in
original coordinates
Stratigraphic: is an equal area projection of the input data in
tilt adjusted coordinates
% Untilting: The dashed (red) curves are representative plots of
maximum eigenvalue (tau_1) as a function of untilting
The solid line is the cumulative distribution of the
% Untilting required to maximize tau for all the
bootstrapped data sets. The dashed vertical lines
are 95% confidence bounds on the % untilting that yields
the most clustered result (maximum tau_1).
Command line: prints out the bootstrapped iterations and
finally the confidence bounds on optimum untilting.
If the 95% conf bounds include 0, then a pre-tilt magnetization is indicated
If the 95% conf bounds include 100, then a post-tilt magnetization is indicated
If the 95% conf bounds exclude both 0 and 100, syn-tilt magnetization is
possible as is vertical axis rotation or other pathologies
"""
kappa = 0
nb = 1000 # number of bootstraps
min, max = -10, 150
dir_path = '.'
infile, orfile = 'pmag_sites.txt', 'er_samples.txt'
critfile = 'pmag_criteria.txt'
dipkey, azkey = 'sample_bed_dip', 'sample_bed_dip_direction'
fmt = 'svg'
plot = 0
if '-WD' in sys.argv:
ind = sys.argv.index('-WD')
dir_path = sys.argv[ind+1]
if '-h' in sys.argv: # check if help is needed
print(main.__doc__)
sys.exit() # graceful quit
if '-n' in sys.argv:
ind = sys.argv.index('-n')
nb = int(sys.argv[ind+1])
if '-fmt' in sys.argv:
ind = sys.argv.index('-fmt')
fmt = sys.argv[ind+1]
if '-sav' in sys.argv:
plot = 1
if '-b' in sys.argv:
ind = sys.argv.index('-b')
min = int(sys.argv[ind+1])
max = int(sys.argv[ind+2])
if '-f' in sys.argv:
ind = sys.argv.index('-f')
infile = sys.argv[ind+1]
if '-fsa' in sys.argv:
ind = sys.argv.index('-fsa')
orfile = sys.argv[ind+1]
elif '-fsi' in sys.argv:
ind = sys.argv.index('-fsi')
orfile = sys.argv[ind+1]
dipkey, azkey = 'site_bed_dip', 'site_bed_dip_direction'
orfile = dir_path+'/'+orfile
infile = dir_path+'/'+infile
critfile = dir_path+'/'+critfile
data, file_type = pmag.magic_read(infile)
ordata, file_type = pmag.magic_read(orfile)
if '-exc' in sys.argv:
crits, file_type = pmag.magic_read(critfile)
for crit in crits:
if crit['pmag_criteria_code'] == "DE-SITE":
SiteCrit = crit
break
# get to work
#
PLTS = {'geo': 1, 'strat': 2, 'taus': 3} # make plot dictionary
pmagplotlib.plot_init(PLTS['geo'], 5, 5)
pmagplotlib.plot_init(PLTS['strat'], 5, 5)
pmagplotlib.plot_init(PLTS['taus'], 5, 5)
GEOrecs = pmag.get_dictitem(data, 'site_tilt_correction', '0', 'T')
if len(GEOrecs) > 0: # have some geographic data
DIDDs = [] # set up list for dec inc dip_direction, dip
for rec in GEOrecs: # parse data
dip, dip_dir = 0, -1
Dec = float(rec['site_dec'])
Inc = float(rec['site_inc'])
orecs = pmag.get_dictitem(
ordata, 'er_site_name', rec['er_site_name'], 'T')
if len(orecs) > 0:
if orecs[0][azkey] != "":
dip_dir = float(orecs[0][azkey])
if orecs[0][dipkey] != "":
dip = float(orecs[0][dipkey])
if dip != 0 and dip_dir != -1:
if '-exc' in sys.argv:
keep = 1
for key in list(SiteCrit.keys()):
if 'site' in key and SiteCrit[key] != "" and rec[key] != "" and key != 'site_alpha95':
if float(rec[key]) < float(SiteCrit[key]):
keep = 0
print(rec['er_site_name'], key, rec[key])
if key == 'site_alpha95' and SiteCrit[key] != "" and rec[key] != "":
if float(rec[key]) > float(SiteCrit[key]):
keep = 0
if keep == 1:
DIDDs.append([Dec, Inc, dip_dir, dip])
else:
DIDDs.append([Dec, Inc, dip_dir, dip])
else:
print('no geographic directional data found')
sys.exit()
pmagplotlib.plot_eq(PLTS['geo'], DIDDs, 'Geographic')
data = numpy.array(DIDDs)
D, I = pmag.dotilt_V(data)
TCs = numpy.array([D, I]).transpose()
pmagplotlib.plot_eq(PLTS['strat'], TCs, 'Stratigraphic')
if not set_env.IS_WIN:
if plot == 0:
pmagplotlib.draw_figs(PLTS)
Percs = list(range(min, max))
Cdf, Untilt = [], []
pylab.figure(num=PLTS['taus'])
print('doing ', nb, ' iterations...please be patient.....')
for n in range(nb): # do bootstrap data sets - plot first 25 as dashed red line
if n % 50 == 0:
print(n)
Taus = [] # set up lists for taus
PDs = pmag.pseudo(DIDDs)
if kappa != 0:
for k in range(len(PDs)):
d, i = pmag.fshdev(kappa)
dipdir, dip = pmag.dodirot(d, i, PDs[k][2], PDs[k][3])
PDs[k][2] = dipdir
PDs[k][3] = dip
for perc in Percs:
tilt = numpy.array([1., 1., 1., 0.01*perc])
D, I = pmag.dotilt_V(PDs*tilt)
TCs = numpy.array([D, I]).transpose()
ppars = pmag.doprinc(TCs) # get principal directions
Taus.append(ppars['tau1'])
if n < 25:
pylab.plot(Percs, Taus, 'r--')
# tilt that gives maximum tau
Untilt.append(Percs[Taus.index(numpy.max(Taus))])
Cdf.append(float(n) / float(nb))
pylab.plot(Percs, Taus, 'k')
pylab.xlabel('% Untilting')
pylab.ylabel('tau_1 (red), CDF (green)')
Untilt.sort() # now for CDF of tilt of maximum tau
pylab.plot(Untilt, Cdf, 'g')
lower = int(.025*nb)
upper = int(.975*nb)
pylab.axvline(x=Untilt[lower], ymin=0, ymax=1, linewidth=1, linestyle='--')
pylab.axvline(x=Untilt[upper], ymin=0, ymax=1, linewidth=1, linestyle='--')
tit = '%i - %i %s' % (Untilt[lower], Untilt[upper], 'Percent Unfolding')
print(tit)
pylab.title(tit)
if plot == 0:
pmagplotlib.draw_figs(PLTS)
ans = input('S[a]ve all figures, <Return> to quit \n ')
if ans != 'a':
print("Good bye")
sys.exit()
files = {}
for key in list(PLTS.keys()):
files[key] = ('foldtest_'+'%s' % (key.strip()[:2])+'.'+fmt)
pmagplotlib.save_plots(PLTS, files)
if __name__ == "__main__":
main()
| bsd-3-clause |
w-k-jones/brownian | brownian_animation.py | 1 | 6448 | """
Created on Wed Jan 25 09:54:15 2017
@authors: William Jones & Luc Moseley
History:
25/01/2017: LM - Set up separate file with vars imported from System
08/02/2017: LM - Fixed bugs & fully assimilated with new code
07/03/2017: WJ - Running with periodic boundary conditions and initial flow
on Bernoulli tube.
16/03/2017: LM - Re-fixed new bugs & improved figure auto-scaling
Variable naming convention:
w_... Wall related variable (should probably make class)
n_... # of ...
p_... position of ...
v_... velocity of ...
..._n normal vector of ...
..._p parallel vector of ...
"""
"""
You cannot run this twice in same shell window, because the original
step function updates global variables - this file simply takes global
variables as they were at end of previous simulation when run again.
"""
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from brownian_tools import *
import brownian_classes as brw
import brownian_system as brs
#set up Bernoulli tube system
in_co = np.array([[-0.5,0],
[0,0],
[0.4,0.3],
[0.6,0.3],
[1,0],
[1.5,0],
[1.5,1],
[1,1],
[0.6,0.7],
[0.4,0.7],
[0.,1],
[-0.5,1]])*10
"""
#co-ords for opposing sawtooth
in_co = np.array([[0,0],
[0.2,0.2],
[0.2,0],
[0.4,0.2],
[0.4,0],
[0.6,0.2],
[0.6,0],
[0.8,0.2],
[0.8,0],
[1,0.2],
[1,1],
[0.8,0.8],
[0.8,1],
[0.6,0.8],
[0.6,1],
[0.4,0.8],
[0.4,1],
[0.2,0.8],
[0.2,1],
[0,0.8]])
#box to test
in_co = np.array([[0,0],
[0,10],
[10,10],
[10,0]])
"""
"""
wal = brw.wall_shape(in_co)
wal.T[:] = 2.5
#Setting periodic boundaries
wal.pb_ind[5] = 11
#wal.pb_ind[11] = 5
bal = brw.balls(176,0.1,1,2,2.5,[2.0,0.],wal)
inst = brs.system(wal,bal)
"""
in_co = np.array([[0,1],
[0,0],
[2,1],
[2,0],
[4,1],
[4,0],
[6,1],
[6,0],
[8,1],
[8,0],
[10,1],
[10,2],
[0,2]])
wal = brw.wall_shape(in_co)
wal.T[:] = 2.5
wal.T[:11] = 5
wal.T[-1] = 5
wal.pb_ind[10] = 12
wal.pb_ind[12] = 10
bal = brw.balls(10,0.1,1,2,2.5,0.,wal)
inst = brs.system(wal,bal)
fig = plt.figure(figsize=(6,6))
ax = fig.add_subplot(111, aspect='equal', autoscale_on=False,
xlim=(wal.xlim[0]-0.1*(wal.xlim[1]-wal.xlim[0]), wal.xlim[1]+0.1*(wal.xlim[1]-wal.xlim[0])),
ylim=(wal.ylim[0]-0.1*(wal.ylim[1]-wal.ylim[0]), wal.ylim[1]+0.3*(wal.ylim[1]-wal.ylim[0])))
# Enter the locations of the particles
particles, = ax.plot([], [], 'bo', ms=5.)
if len(bal.r2) > 1:
particles1, = ax.plot([], [], 'bo', ms=10.)
if len(bal.r2) > 2:
particles2, = ax.plot([], [], 'bo', ms=15.)
time_text = ax.text(0.025, 0.93, '', transform=ax.transAxes)
temp_text = ax.text(0.025, 0.88, '', transform=ax.transAxes)
energy_text = ax.text(0.025, 0.83, '', transform=ax.transAxes)
pressure_text = ax.text(0.025, 0.78, '', transform=ax.transAxes)
avg_press_text = ax.text(0.025, 0.73, '', transform=ax.transAxes)
#Make container boundary
ax.plot(wal.co_plt[:,0],wal.co_plt[:,1])
# Initialize animation
def init():
particles.set_data([], [])
if len(bal.r2) > 1:
particles1.set_data([], [])
if len(bal.r2) > 2:
particles2.set_data([], [])
time_text.set_text('')
energy_text.set_text('')
temp_text.set_text('')
pressure_text.set_text('')
avg_press_text.set_text('')
if len(bal.r2) == 1:
return particles, time_text, temp_text#, energy_text, pressure_text, avg_press_text
elif len(bal.r2) == 2:
return particles, particles1, time_text, temp_text#, energy_text, pressure_text, avg_press_text
else:
return particles, particles1, particles2, time_text, temp_text#, energy_text, pressure_text, avg_press_text
# Perform animation step
def animate(i):
global ax, fig #rect
#step function forward
#p, energy, pressure, avg_press = inst.step()
p, t, T = inst.step()
#set marker size based on ball size
ms = fig.dpi * fig.get_figwidth()/(ax.get_xlim()[1] - ax.get_xlim()[0]) * 2*bal.r2[0]
particles.set_markersize(ms)
particles.set_data(p[0:bal.n_balls[0], 0], p[0:bal.n_balls[0], 1])
if len(bal.r2) > 1:
ms1 = fig.dpi * fig.get_figwidth()/(ax.get_xlim()[1] - ax.get_xlim()[0]) * 2*bal.r2[1]
particles1.set_markersize(ms1)
particles1.set_markerfacecolor('r')
particles1.set_data(p[bal.n_balls[0]:sum(bal.n_balls[0:2]), 0], p[bal.n_balls[0]:sum(bal.n_balls[0:2]), 1])
if len(bal.r2) > 2:
ms2 = fig.dpi * fig.get_figwidth()/(ax.get_xlim()[1] - ax.get_xlim()[0]) * 2*bal.r2[2]
particles2.set_markersize(ms2)
particles2.set_data(p[sum(bal.n_balls[0:2]):sum(bal.n_balls[0:3]), 0], p[sum(bal.n_balls[0:2]):sum(bal.n_balls[0:3]), 1])
time_text.set_text('Time = %.1f ps' % t)
temp_text.set_text('Temperature = %i K' % (T*120.))
#pressure_text.set_text('Pressure = %.2f mPa' % (pressure*1000.))
#avg_press_text.set_text('Average Pressure = %.1f mPa' % (avg_press*1000.))
if len(bal.r2) == 1:
return particles, time_text, temp_text#, energy_text, pressure_text, avg_press_text #rect
elif len(bal.r2) == 2:
return particles, particles1, time_text, temp_text#, energy_text, pressure_text, avg_press_text #rect
else:
return particles, particles1, particles2, time_text, temp_text#, energy_text, pressure_text, avg_press_text #rect
ani = animation.FuncAnimation(fig, animate, frames=600, interval=10, blit=True, init_func=init)
plt.show()
| mit |
skavulya/spark-tk | regression-tests/generatedata/gmm_datagen.py | 14 | 1129 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Generates data for gmm model
params: n_samples: number of rows
centers: number of centroids
n_features: number of columns"""
from sklearn.datasets.samples_generator import make_blobs
def gen_data(n_rows, k, features):
x,y = make_blobs(n_samples=n_rows, centers=k, n_features=features, random_state=14)
for row in x.tolist():
print ",".join(map(str,row))
gen_data(50, 5, 2)
| apache-2.0 |
shahankhatch/scikit-learn | sklearn/metrics/cluster/tests/test_bicluster.py | 394 | 1770 | """Testing for bicluster metrics module"""
import numpy as np
from sklearn.utils.testing import assert_equal, assert_almost_equal
from sklearn.metrics.cluster.bicluster import _jaccard
from sklearn.metrics import consensus_score
def test_jaccard():
a1 = np.array([True, True, False, False])
a2 = np.array([True, True, True, True])
a3 = np.array([False, True, True, False])
a4 = np.array([False, False, True, True])
assert_equal(_jaccard(a1, a1, a1, a1), 1)
assert_equal(_jaccard(a1, a1, a2, a2), 0.25)
assert_equal(_jaccard(a1, a1, a3, a3), 1.0 / 7)
assert_equal(_jaccard(a1, a1, a4, a4), 0)
def test_consensus_score():
a = [[True, True, False, False],
[False, False, True, True]]
b = a[::-1]
assert_equal(consensus_score((a, a), (a, a)), 1)
assert_equal(consensus_score((a, a), (b, b)), 1)
assert_equal(consensus_score((a, b), (a, b)), 1)
assert_equal(consensus_score((a, b), (b, a)), 1)
assert_equal(consensus_score((a, a), (b, a)), 0)
assert_equal(consensus_score((a, a), (a, b)), 0)
assert_equal(consensus_score((b, b), (a, b)), 0)
assert_equal(consensus_score((b, b), (b, a)), 0)
def test_consensus_score_issue2445():
''' Different number of biclusters in A and B'''
a_rows = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
a_cols = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
idx = [0, 2]
s = consensus_score((a_rows, a_cols), (a_rows[idx], a_cols[idx]))
# B contains 2 of the 3 biclusters in A, so score should be 2/3
assert_almost_equal(s, 2.0/3.0)
| bsd-3-clause |
miaecle/deepchem | contrib/DiabeticRetinopathy/data.py | 5 | 3607 | """
Diabetic Retinopathy Images loader.
"""
from __future__ import division
from __future__ import unicode_literals
import os
import logging
import deepchem
import numpy as np
import pandas as pd
logger = logging.getLogger(__name__)
def load_images_DR(split='random', seed=None):
""" Loader for DR images """
data_dir = deepchem.utils.get_data_dir()
images_path = os.path.join(data_dir, 'DR', 'train')
label_path = os.path.join(data_dir, 'DR', 'trainLabels.csv')
if not os.path.exists(images_path) or not os.path.exists(label_path):
logger.warn("Cannot locate data, \n\
all images(.png) should be stored in the folder: $DEEPCHEM_DATA_DIR/DR/train/,\n\
corresponding label file should be stored as $DEEPCHEM_DATA_DIR/DR/trainLabels.csv.\n\
Please refer to https://www.kaggle.com/c/diabetic-retinopathy-detection for data access"
)
image_names = os.listdir(images_path)
raw_images = []
for im in image_names:
if im.endswith('.jpeg') and not im.startswith(
'cut_') and not 'cut_' + im in image_names:
raw_images.append(im)
if len(raw_images) > 0:
cut_raw_images(raw_images, images_path)
image_names = [
p for p in os.listdir(images_path)
if p.startswith('cut_') and p.endswith('.png')
]
all_labels = dict(zip(*np.transpose(np.array(pd.read_csv(label_path)))))
print("Number of images: %d" % len(image_names))
labels = np.array(
[all_labels[os.path.splitext(n)[0][4:]] for n in image_names]).reshape(
(-1, 1))
image_full_paths = [os.path.join(images_path, n) for n in image_names]
classes, cts = np.unique(list(all_labels.values()), return_counts=True)
weight_ratio = dict(zip(classes, np.max(cts) / cts.astype(float)))
weights = np.array([weight_ratio[l[0]] for l in labels]).reshape((-1, 1))
loader = deepchem.data.ImageLoader()
dat = loader.featurize(
image_full_paths, labels=labels, weights=weights)
if split == None:
return dat
splitters = {
'index': deepchem.splits.IndexSplitter(),
'random': deepchem.splits.RandomSplitter()
}
if not seed is None:
np.random.seed(seed)
splitter = splitters[split]
train, valid, test = splitter.train_valid_test_split(dat)
all_dataset = (train, valid, test)
return all_dataset
def cut_raw_images(all_images, path):
"""Preprocess images:
(1) Crop the central square including retina
(2) Reduce resolution to 512 * 512
"""
print("Num of images to be processed: %d" % len(all_images))
try:
import cv2
except:
logger.warn("OpenCV required for image preprocessing")
return
for i, img_path in enumerate(all_images):
if i % 100 == 0:
print("on image %d" % i)
if os.path.join(path, 'cut_' + os.path.splitext(img_path)[0] + '.png'):
continue
img = cv2.imread(os.path.join(path, img_path))
edges = cv2.Canny(img, 10, 30)
coords = zip(*np.where(edges > 0))
n_p = len(coords)
coords.sort(key=lambda x: (x[0], x[1]))
center_0 = int(
(coords[int(0.01 * n_p)][0] + coords[int(0.99 * n_p)][0]) / 2)
coords.sort(key=lambda x: (x[1], x[0]))
center_1 = int(
(coords[int(0.01 * n_p)][1] + coords[int(0.99 * n_p)][1]) / 2)
edge_size = min(
[center_0, img.shape[0] - center_0, center_1, img.shape[1] - center_1])
img_cut = img[(center_0 - edge_size):(center_0 + edge_size), (
center_1 - edge_size):(center_1 + edge_size)]
img_cut = cv2.resize(img_cut, (512, 512))
cv2.imwrite(
os.path.join(path, 'cut_' + os.path.splitext(img_path)[0] + '.png'),
img_cut)
| mit |
UDST/activitysim | activitysim/abm/models/atwork_subtour_scheduling.py | 2 | 3437 | # ActivitySim
# See full license in LICENSE.txt.
from __future__ import (absolute_import, division, print_function, )
from future.standard_library import install_aliases
install_aliases() # noqa: E402
import logging
import pandas as pd
from activitysim.core import simulate
from activitysim.core import tracing
from activitysim.core import pipeline
from activitysim.core import config
from activitysim.core import inject
from activitysim.core import timetable as tt
from .util.vectorize_tour_scheduling import vectorize_subtour_scheduling
from .util.expressions import annotate_preprocessors
from activitysim.core.util import assign_in_place
logger = logging.getLogger(__name__)
DUMP = False
@inject.step()
def atwork_subtour_scheduling(
tours,
persons_merged,
tdd_alts,
skim_dict,
chunk_size,
trace_hh_id):
"""
This model predicts the departure time and duration of each activity for at work subtours tours
"""
trace_label = 'atwork_subtour_scheduling'
model_settings = config.read_model_settings('tour_scheduling_atwork.yaml')
model_spec = simulate.read_model_spec(file_name='tour_scheduling_atwork.csv')
persons_merged = persons_merged.to_frame()
tours = tours.to_frame()
subtours = tours[tours.tour_category == 'atwork']
# - if no atwork subtours
if subtours.shape[0] == 0:
tracing.no_results(trace_label)
return
logger.info("Running %s with %d tours", trace_label, len(subtours))
# preprocessor
constants = config.get_model_constants(model_settings)
od_skim_wrapper = skim_dict.wrap('origin', 'destination')
do_skim_wrapper = skim_dict.wrap('destination', 'origin')
skims = {
"od_skims": od_skim_wrapper,
"do_skims": do_skim_wrapper,
}
annotate_preprocessors(
subtours, constants, skims,
model_settings, trace_label)
# parent_tours table with columns ['tour_id', 'tdd'] index = tour_id
parent_tour_ids = subtours.parent_tour_id.astype(int).unique()
parent_tours = pd.DataFrame({'tour_id': parent_tour_ids}, index=parent_tour_ids)
parent_tours = parent_tours.merge(tours[['tdd']], left_index=True, right_index=True)
tdd_choices = vectorize_subtour_scheduling(
parent_tours,
subtours,
persons_merged,
tdd_alts, model_spec,
model_settings,
chunk_size=chunk_size,
trace_label=trace_label)
assign_in_place(tours, tdd_choices)
pipeline.replace_table("tours", tours)
if trace_hh_id:
tracing.trace_df(tours[tours.tour_category == 'atwork'],
label="atwork_subtour_scheduling",
slicer='person_id',
index_label='tour_id',
columns=None)
if DUMP:
subtours = tours[tours.tour_category == 'atwork']
parent_tours = tours[tours.index.isin(subtours.parent_tour_id)]
tracing.dump_df(DUMP, subtours, trace_label, 'sub_tours')
tracing.dump_df(DUMP, parent_tours, trace_label, 'parent_tours')
parent_tours['parent_tour_id'] = parent_tours.index
subtours = pd.concat([parent_tours, subtours])
tracing.dump_df(DUMP,
tt.tour_map(parent_tours, subtours, tdd_alts,
persons_id_col='parent_tour_id'),
trace_label, 'tour_map')
| bsd-3-clause |
tectronics/pygranule | pygranule/pyorbital_layer.py | 2 | 4246 | from .orbital_layer import OrbitalLayer, OrbitalLayerError
from datetime import datetime, timedelta
import numpy as np
import os
from pyorbital.geoloc_instrument_definitions import avhrr, viirs
from pyorbital.geoloc import compute_pixels, get_lonlatalt
#from urllib2 import URLError
from pyorbital.orbital import Orbital
class PyOrbitalLayer(OrbitalLayer):
"""
pyorbital based orbital layer
"""
__implements__ = (OrbitalLayer,)
def __init__(self, aoi, sat, instrument="AVHRR"):
OrbitalLayer.__init__(self,aoi,sat,instrument)
# instantiate orbital module
config_file_path = ""
try:
config_file_path = os.environ['PYGRANULE_CONFIG_PATH']
except KeyError:
print "pygranule config file path missing. Has the 'PYGRANULE_CONFIG_PATH' environment variable been set?"
default_tle_file = config_file_path+"/default.tle"
try:
self.orbital = Orbital(sat,default_tle_file)
except:
print "Failed to open default tle file:", default_tle_file
print "Downloading from internet:"
try:
self.orbital = Orbital(sat)
except:
raise OrbitalLayerError("Pyorbital Failed to fetch TLE from internet.")
# create scan geometry - one scan line.
if instrument == "AVHRR":
scan_steps = np.arange(0,self.instrument_info['scan_steps'],self.instrument_info['scan_steps']/8-1)
scan_steps[-1] = self.instrument_info['scan_steps']-1
self.scan_geom = avhrr(1,scan_steps)
elif instrument == "VIIRS":
self.scan_geom = viirs(1)
def set_tle(self, line1, line2):
# for now restart pyorbital with these new elements.
del self.orbital
self.orbital = Orbital(self.sat,line1=line1, line2=line2)
def orbital_period(self):
return 24*60/self.orbital.tle.mean_motion
def scan_line_lonlats(self, t):
"""
Returns a single instrument scan line starting at datetime t
"""
s_times = self.scan_geom.times(t)
pixels_pos = compute_pixels((self.orbital.tle.line1, self.orbital.tle.line2),
self.scan_geom, s_times)
pos_time = get_lonlatalt(pixels_pos, s_times)
return np.array((pos_time[0],pos_time[1]))
def next_transit(self, start=datetime.now(), resolution=100):
"""
Next transit time relative to center of aoi.
Resolution accuracy defined by subdivision of orbital period.
"""
# accuracy in mintues from mean orbital period
dt = self.orbital_period()/resolution
# observer position
alt = 0.0
lon, lat = self.aoi_center()
# NOTE: For now I do not use the pyorbital
# get_next_passes. Because it accepts integer (not float) hours
# for the search period, and the accuracy of the transit time
# search is not obvious to me at the moment.
# Also I would like to allow negative transit time altitudes
# - transits below horizon.
# Therefore I re-implement the search myself, here to have more control:
t_offsets = np.arange(0.0,self.orbital_period()*1.2,dt)
e = np.array( [ self.orbital.get_observer_look(start + timedelta(minutes=t_offset), lon, lat, alt)[1] \
for t_offset in t_offsets ] )
# search for local maxima
b = (np.roll(e,1)<e)&(np.roll(e,-1)<e)
idx = np.where(b[1:]==True)[0][0]+1 #i.e. do not accept index 0 as maximum...
## return a quadratic maximum for good accuracy based on data.
## Quadratic fit to 3 points around maximum elevation
## seems to improve accuracy by 100-fold.
x,y = t_offsets[idx-1:idx+2],e[idx-1:idx+2]
fit = np.polyfit(x,y,2)
t = -fit[1]/(2.0*fit[0])
#from matplotlib import pyplot as plt
#plt.plot(x,y,'x')
#fitx = np.arange(x[0]-1,x[2]+1,0.1)
#fity = fit[2]+fit[1]*fitx+fit[0]*(fitx**2)
#plt.plot(fitx,fity,'r-')
#plt.show()
return start + timedelta(minutes=t)
| gpl-3.0 |
charanpald/sandbox | sandbox/predictors/test/DecisionTreeTest.py | 1 | 5296 | import apgl
import unittest
import numpy
import logging
from sandbox.predictors.DecisionTree import DecisionTree
from sandbox.util.PathDefaults import PathDefaults
from sandbox.util.Evaluator import Evaluator
from sandbox.util.Sampling import Sampling
@apgl.skipIf(not apgl.checkImport('sklearn'), 'Module sklearn is required')
class DecisionTreeTestCase(unittest.TestCase):
def setUp(self):
numpy.random.seed(21)
numExamples = 200
numFeatures = 5
self.X = numpy.random.rand(numExamples, numFeatures)
c = numpy.random.rand(numFeatures)
self.y = numpy.sign(self.X.dot(c) - numpy.mean(self.X.dot(c)))
def testLearnModel(self):
decisionTree = DecisionTree()
decisionTree.learnModel(self.X, self.y)
tree = decisionTree.getClassifier()
def testPredict(self):
decisionTree = DecisionTree()
decisionTree.learnModel(self.X, self.y)
predY = decisionTree.predict(self.X)
inds = numpy.random.permutation(self.X.shape[0])
predY2 = decisionTree.predict(self.X[inds, :])
self.assertTrue((predY[inds] == predY2).all())
#Let's test on -1, +1 labels
y2 = (self.y*2)-1
decisionTree.learnModel(self.X, y2)
predY2 = decisionTree.predict(self.X)
self.assertTrue((predY2 == predY*2-1).all())
def testSetWeight(self):
decisionTree = DecisionTree()
decisionTree.setWeight(1.0)
decisionTree.learnModel(self.X, self.y)
predY = decisionTree.predict(self.X)
self.assertTrue((predY == numpy.ones(self.y.shape[0])).all())
decisionTree.setWeight(0.0)
decisionTree.learnModel(self.X, self.y)
predY = decisionTree.predict(self.X)
self.assertTrue((predY == numpy.zeros(self.y.shape[0])).all())
def testMinSplit(self):
decisionTree = DecisionTree()
decisionTree.setMinSplit(20)
decisionTree.learnModel(self.X, self.y)
size = decisionTree.getTree().node_count
#orngTree.printTree(decisionTree.getClassifier())
decisionTree.setMinSplit(1)
decisionTree.learnModel(self.X, self.y)
size2 = decisionTree.getTree().node_count
#orngTree.printTree(decisionTree.getClassifier())
self.assertTrue(size < size2)
def testGenerate(self):
generate = DecisionTree.generate(5, 50)
learner = generate()
learner.learnModel(self.X, self.y)
self.assertEquals(learner.getMaxDepth(), 5)
self.assertEquals(learner.getMinSplit(), 50)
@apgl.skip("")
def testSetWeight(self):
#Try weight = 0 and weight = 1
decisionTree = DecisionTree()
decisionTree.setWeight(0.0)
decisionTree.learnModel(self.X, self.y)
predY = decisionTree.predict(self.X)
self.assertTrue((predY == numpy.zeros(predY.shape[0])).all())
decisionTree.setWeight(1.0)
decisionTree.learnModel(self.X, self.y)
predY = decisionTree.predict(self.X)
self.assertTrue((predY == numpy.ones(predY.shape[0])).all())
def testPredict2(self):
#We play around with parameters to maximise AUC on the IGF1_0-Haar data
dataDir = PathDefaults.getDataDir()
fileName = dataDir + "IGF1_0-Haar.npy"
XY = numpy.load(fileName)
X = XY[:, 0:XY.shape[1]-1]
y = XY[:, XY.shape[1]-1].ravel()
weight = numpy.bincount(numpy.array(y, numpy.int))[0]/float(y.shape[0])
#weight = 0.5
#weight = 0.9
folds = 3
decisionTree = DecisionTree()
decisionTree.setWeight(weight)
decisionTree.setMaxDepth(50)
#decisionTree.setMinSplit(100)
mean, var = decisionTree.evaluateCv(X, y, folds, Evaluator.auc)
logging.debug("AUC = " + str(mean))
logging.debug("Var = " + str(var))
def testSetMaxDepth(self):
maxDepth = 20
decisionTree = DecisionTree()
decisionTree.setMaxDepth(maxDepth)
decisionTree.learnModel(self.X, self.y)
#self.assertTrue(DecisionTree.depth(decisionTree.getClassifier().tree) <= maxDepth+1)
maxDepth = 5
decisionTree = DecisionTree()
decisionTree.setMaxDepth(maxDepth)
decisionTree.learnModel(self.X, self.y)
#self.assertTrue(DecisionTree.depth(decisionTree.getClassifier().tree) <= maxDepth+1)
def testParallelVfcv(self):
folds = 3
idx = Sampling.crossValidation(folds, self.X.shape[0])
decisionTree = DecisionTree()
bestLearner, meanErrors = decisionTree.parallelVfcv(self.X, self.y, idx)
#print(meanErrors)
def testParallelPenaltyGrid(self):
folds = 3
idx = Sampling.crossValidation(folds, self.X.shape[0])
decisionTree = DecisionTree()
bestLearner, meanErrors = decisionTree.parallelVfcv(self.X, self.y, idx)
trainX = self.X[0:40, :]
trainY = self.y[0:40]
paramDict = {}
paramDict["setMinSplit"] = decisionTree.getMinSplits()
paramDict["setMaxDepth"] = decisionTree.getMaxDepths()
idealPenalties = decisionTree.parallelPenaltyGrid(trainX, trainY, self.X, self.y, paramDict)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
yousrabk/mne-python | examples/preprocessing/plot_find_ecg_artifacts.py | 19 | 1304 | """
==================
Find ECG artifacts
==================
Locate QRS component of ECG.
"""
# Authors: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
# Setup for reading the raw data
raw = io.Raw(raw_fname)
event_id = 999
ecg_events, _, _ = mne.preprocessing.find_ecg_events(raw, event_id,
ch_name='MEG 1531')
# Read epochs
picks = mne.pick_types(raw.info, meg=False, eeg=False, stim=False, eog=False,
include=['MEG 1531'], exclude='bads')
tmin, tmax = -0.1, 0.1
epochs = mne.Epochs(raw, ecg_events, event_id, tmin, tmax, picks=picks,
proj=False)
data = epochs.get_data()
print("Number of detected ECG artifacts : %d" % len(data))
###############################################################################
# Plot ECG artifacts
plt.plot(1e3 * epochs.times, np.squeeze(data).T)
plt.xlabel('Times (ms)')
plt.ylabel('ECG')
plt.show()
| bsd-3-clause |
rhuelga/sms-tools | lectures/05-Sinusoidal-model/plots-code/spectral-peaks-interpolation.py | 2 | 1241 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackmanharris
from scipy.fftpack import fft, ifft
import math
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DFT
import utilFunctions as UF
(fs, x) = UF.wavread('../../../sounds/oboe-A4.wav')
N = 512*2
M = 511
t = -60
w = np.hamming(M)
start = int(.8*fs)
hN = N//2
hM = (M+1)//2
x1 = x[start:start+M]
mX, pX = DFT.dftAnal(x1, w, N)
ploc = UF.peakDetection(mX, t)
iploc, ipmag, ipphase = UF.peakInterp(mX, pX, ploc)
pmag = mX[ploc]
freqaxis = fs*np.arange(mX.size)/float(N)
plt.figure(1, figsize=(9.5, 5.5))
plt.subplot (2,1,1)
plt.plot(freqaxis,mX,'r', lw=1.5)
plt.axis([300,2500,-70,max(mX)])
plt.plot(fs * iploc / N, ipmag, marker='x', color='b', linestyle='', markeredgewidth=1.5)
plt.title('mX + spectral peaks (oboe-A4.wav)')
plt.subplot (2,1,2)
plt.plot(freqaxis,pX,'c', lw=1.5)
plt.axis([300,2500,min(pX),-1])
plt.plot(fs * iploc / N, ipphase, marker='x', color='b', linestyle='', markeredgewidth=1.5)
plt.title('pX + spectral peaks')
plt.tight_layout()
plt.savefig('spectral-peaks-interpolation.png')
plt.show()
| agpl-3.0 |
UNH-CORE/RM2-CACTUS | scripts/jacobs-data.py | 2 | 5171 | #!/usr/bin/env python
"""Create hybrid foil coefficient dataset from Sheldahl and Jacobs data."""
from __future__ import division, print_function
import numpy as np
import pandas as pd
from scipy.stats import linregress
# Default CACTUS 0021 data has zero for all moment coeffs
zero_cms = True
# Whether or not to calculate lift slopes
calc_lift_slopes = False
# Whether or not to calculate LB DS model critical lift coefficients
calc_lb_crit_cls = False
# Whether or not to calculate BV DS model stall angles
calc_bv_stall_angles = False
# Offset for BV DS model stall angles versus default (deg)
bv_stall_angle_offset = 1.5
header = \
"""Title: NACA0021
Thickness to Chord Ratio: 0.21
Zero Lift AOA (deg): 0.0
Reverse Camber Direction: 0"""
subheader = \
"""Reynolds Number: {re}
BV Dyn. Stall Model - Positive Stall AOA (deg): {bv_pos_stall_angle}
BV Dyn. Stall Model - Negative Stall AOA (deg): {bv_nev_stall_angle}
LB Dyn. Stall Model - Lift Coeff. Slope at Zero Lift AOA (per radian): {lb_lift_coeff_slope}
LB Dyn. Stall Model - Positive Critical Lift Coeff.: {lb_pos_crit_cl}
LB Dyn. Stall Model - Negative Critical Lift Coeff.: {lb_neg_crit_cl}
AOA (deg) CL CD Cm25"""
re_list = ["8.3e4", "1.6e5", "3.8e5"]
# Default parameters from CACTUS input data (all but lift slopes are replaced)
bv_stall_angles = {"8.3e4": 4.0, "1.6e5": 5.0, "3.8e5": 5.0}
lb_lift_slopes = {"8.3e4": 5.277, "1.6e5": 5.371, "3.8e5": 6.303}
lb_crit_cls = {"8.3e4": 0.829, "1.6e5": 1.031, "3.8e5": 1.32}
# Manually add to BV stall angles
for re in bv_stall_angles:
bv_stall_angles[re] += bv_stall_angle_offset
def calc_lift_slope(df, alpha_max=9.0):
"""Calculate lift coefficient slope per unit radian using a linear
regression.
"""
df = df[df.alpha_deg >= 0]
df = df[df.alpha_deg <= alpha_max]
df["alpha_rad"] = np.deg2rad(df.alpha_deg)
slope, intercept, r_val, p_val, std_err = linregress(df.alpha_rad, df.cl)
return slope
def detect_ss_angle(df, threshold=0.03):
"""Detect static stall angle from input DataFrame by finding where the
change in `cd` per degree `alpha_deg` reaches `threshold`.
Should be run on the Sheldahl data since Jacobs does not contain `cd`.
"""
df["alpha"] = df.alpha_deg
df = df[df.alpha > 2]
df = df[df.alpha < 40]
dcd_dalpha = np.diff(df.cd) / np.diff(df.alpha)
i = np.where(dcd_dalpha >= threshold)[0][0]
return df.alpha.iloc[i]
def calc_crit_cl(df, re, fcrit=0.7, alpha1_fraction=0.87):
"""Calculate critical lift coefficient for Leishman--Beddoes model.
Code from turbinesFoam:
CN1_ = CNAlpha_*alpha1_*pow((1.0 + sqrt(f))/2.0, 2);
Technically this returns the critical normal force coefficient.
"""
df["alpha_rad"] = np.deg2rad(df.alpha_deg)
alpha1 = np.deg2rad(alpha1_fraction*detect_ss_angle(df))
# Use existing lift slopes
cn_alpha = lb_lift_slopes[re]
cn1 = cn_alpha*alpha1*((1.0 + np.sqrt(fcrit))/2.0)**2
return cn1
# Create empty dictionary for DataFrames
dfs = {}
# Load Jacobs data and mirror about zero angle of attack
for re in re_list:
df = pd.read_csv("config/foildata/NACA_0021_Jacobs_{}.csv".format(re))
df = df[df.alpha >= 0.0]
alpha = np.append(-np.flipud(df.alpha), df.alpha)
cl = np.append(-np.flipud(df.cl), df.cl)
df = pd.DataFrame()
df["alpha_deg"] = alpha
df["cl"] = cl
dfs[re] = df
# Fill in Jacobs C_d and C_m data from Sheldahl, interpolating to the Jacobs
# AoAs
for re in re_list:
df = dfs[re]
df_sh = pd.read_csv("config/foildata/NACA_0021_Sheldahl_{}.csv".format(re))
df["cd"] = np.interp(df.alpha_deg, df_sh.alpha_deg, df_sh.cd)
df["cm"] = np.interp(df.alpha_deg, df_sh.alpha_deg, df_sh.cm)
# Replace all Sheldahl data with Jacobs in its AoA range
df_sh_save_pos = df_sh[df_sh.alpha_deg > df.alpha_deg.max()]
df_sh_save_neg = df_sh[df_sh.alpha_deg < df.alpha_deg.min()]
df = df_sh_save_neg.append(df, ignore_index=True)
df = df.append(df_sh_save_pos, ignore_index=True)
dfs[re] = df
# Calculate lift slope
if calc_lift_slopes:
lb_lift_slopes[re] = calc_lift_slope(df)
# Calculate critical normal force coefficients and use as critical `cl`
if calc_lb_crit_cls:
lb_crit_cls[re] = calc_crit_cl(df_sh, re)
# Detect static stall angles for BV model
if calc_bv_stall_angles:
bv_stall_angles[re] = detect_ss_angle(df_sh)
# Write final text file in correct format
txt = header + "\n\n"
for re in re_list:
txt += subheader.format(re=re, bv_pos_stall_angle=bv_stall_angles[re],
bv_nev_stall_angle=bv_stall_angles[re],
lb_lift_coeff_slope=lb_lift_slopes[re],
lb_pos_crit_cl=lb_crit_cls[re],
lb_neg_crit_cl=lb_crit_cls[re]) + "\n"
df = dfs[re]
if zero_cms:
df.cm *= 0.0
for alpha_deg, cl, cd, cm in zip(df.alpha_deg, df.cl, df.cd, df.cm):
txt += str(alpha_deg) + "\t" + str(cl) + "\t" + str(cd) + "\t" + str(cm)
txt += "\n"
txt += "\n"
with open("config/foildata/NACA_0021_Jacobs.dat", "w") as f:
f.write(txt)
| mit |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/matplotlib/backends/backend_agg.py | 10 | 22144 | """
An agg http://antigrain.com/ backend
Features that are implemented
* capstyles and join styles
* dashes
* linewidth
* lines, rectangles, ellipses
* clipping to a rectangle
* output to RGBA and PNG, optionally JPEG and TIFF
* alpha blending
* DPI scaling properly - everything scales properly (dashes, linewidths, etc)
* draw polygon
* freetype2 w/ ft2font
TODO:
* integrate screen dpi w/ ppi and text
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import threading
import numpy as np
from math import radians, cos, sin
from matplotlib import verbose, rcParams
from matplotlib.backend_bases import (RendererBase, FigureManagerBase,
FigureCanvasBase)
from matplotlib.cbook import is_string_like, maxdict, restrict_dict
from matplotlib.figure import Figure
from matplotlib.font_manager import findfont, get_font
from matplotlib.ft2font import (LOAD_FORCE_AUTOHINT, LOAD_NO_HINTING,
LOAD_DEFAULT, LOAD_NO_AUTOHINT)
from matplotlib.mathtext import MathTextParser
from matplotlib.path import Path
from matplotlib.transforms import Bbox, BboxBase
from matplotlib import colors as mcolors
from matplotlib.backends._backend_agg import RendererAgg as _RendererAgg
from matplotlib import _png
try:
from PIL import Image
_has_pil = True
except ImportError:
_has_pil = False
backend_version = 'v2.2'
def get_hinting_flag():
mapping = {
True: LOAD_FORCE_AUTOHINT,
False: LOAD_NO_HINTING,
'either': LOAD_DEFAULT,
'native': LOAD_NO_AUTOHINT,
'auto': LOAD_FORCE_AUTOHINT,
'none': LOAD_NO_HINTING
}
return mapping[rcParams['text.hinting']]
class RendererAgg(RendererBase):
"""
The renderer handles all the drawing primitives using a graphics
context instance that controls the colors/styles
"""
debug=1
# we want to cache the fonts at the class level so that when
# multiple figures are created we can reuse them. This helps with
# a bug on windows where the creation of too many figures leads to
# too many open file handles. However, storing them at the class
# level is not thread safe. The solution here is to let the
# FigureCanvas acquire a lock on the fontd at the start of the
# draw, and release it when it is done. This allows multiple
# renderers to share the cached fonts, but only one figure can
# draw at at time and so the font cache is used by only one
# renderer at a time
lock = threading.RLock()
def __init__(self, width, height, dpi):
if __debug__: verbose.report('RendererAgg.__init__', 'debug-annoying')
RendererBase.__init__(self)
self.dpi = dpi
self.width = width
self.height = height
if __debug__: verbose.report('RendererAgg.__init__ width=%s, height=%s'%(width, height), 'debug-annoying')
self._renderer = _RendererAgg(int(width), int(height), dpi, debug=False)
self._filter_renderers = []
if __debug__: verbose.report('RendererAgg.__init__ _RendererAgg done',
'debug-annoying')
self._update_methods()
self.mathtext_parser = MathTextParser('Agg')
self.bbox = Bbox.from_bounds(0, 0, self.width, self.height)
if __debug__: verbose.report('RendererAgg.__init__ done',
'debug-annoying')
def __getstate__(self):
# We only want to preserve the init keywords of the Renderer.
# Anything else can be re-created.
return {'width': self.width, 'height': self.height, 'dpi': self.dpi}
def __setstate__(self, state):
self.__init__(state['width'], state['height'], state['dpi'])
def _get_hinting_flag(self):
if rcParams['text.hinting']:
return LOAD_FORCE_AUTOHINT
else:
return LOAD_NO_HINTING
# for filtering to work with rasterization, methods needs to be wrapped.
# maybe there is better way to do it.
def draw_markers(self, *kl, **kw):
return self._renderer.draw_markers(*kl, **kw)
def draw_path_collection(self, *kl, **kw):
return self._renderer.draw_path_collection(*kl, **kw)
def _update_methods(self):
self.draw_quad_mesh = self._renderer.draw_quad_mesh
self.draw_gouraud_triangle = self._renderer.draw_gouraud_triangle
self.draw_gouraud_triangles = self._renderer.draw_gouraud_triangles
self.draw_image = self._renderer.draw_image
self.copy_from_bbox = self._renderer.copy_from_bbox
self.get_content_extents = self._renderer.get_content_extents
def tostring_rgba_minimized(self):
extents = self.get_content_extents()
bbox = [[extents[0], self.height - (extents[1] + extents[3])],
[extents[0] + extents[2], self.height - extents[1]]]
region = self.copy_from_bbox(bbox)
return np.array(region), extents
def draw_path(self, gc, path, transform, rgbFace=None):
"""
Draw the path
"""
nmax = rcParams['agg.path.chunksize'] # here at least for testing
npts = path.vertices.shape[0]
if (nmax > 100 and npts > nmax and path.should_simplify and
rgbFace is None and gc.get_hatch() is None):
nch = np.ceil(npts/float(nmax))
chsize = int(np.ceil(npts/nch))
i0 = np.arange(0, npts, chsize)
i1 = np.zeros_like(i0)
i1[:-1] = i0[1:] - 1
i1[-1] = npts
for ii0, ii1 in zip(i0, i1):
v = path.vertices[ii0:ii1,:]
c = path.codes
if c is not None:
c = c[ii0:ii1]
c[0] = Path.MOVETO # move to end of last chunk
p = Path(v, c)
self._renderer.draw_path(gc, p, transform, rgbFace)
else:
self._renderer.draw_path(gc, path, transform, rgbFace)
def draw_mathtext(self, gc, x, y, s, prop, angle):
"""
Draw the math text using matplotlib.mathtext
"""
if __debug__: verbose.report('RendererAgg.draw_mathtext',
'debug-annoying')
ox, oy, width, height, descent, font_image, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
xd = descent * sin(radians(angle))
yd = descent * cos(radians(angle))
x = np.round(x + ox + xd)
y = np.round(y - oy + yd)
self._renderer.draw_text_image(font_image, x, y + 1, angle, gc)
def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
"""
Render the text
"""
if __debug__: verbose.report('RendererAgg.draw_text', 'debug-annoying')
if ismath:
return self.draw_mathtext(gc, x, y, s, prop, angle)
flags = get_hinting_flag()
font = self._get_agg_font(prop)
if font is None: return None
if len(s) == 1 and ord(s) > 127:
font.load_char(ord(s), flags=flags)
else:
# We pass '0' for angle here, since it will be rotated (in raster
# space) in the following call to draw_text_image).
font.set_text(s, 0, flags=flags)
font.draw_glyphs_to_bitmap(antialiased=rcParams['text.antialiased'])
d = font.get_descent() / 64.0
# The descent needs to be adjusted for the angle
xo, yo = font.get_bitmap_offset()
xo /= 64.0
yo /= 64.0
xd = -d * sin(radians(angle))
yd = d * cos(radians(angle))
#print x, y, int(x), int(y), s
self._renderer.draw_text_image(
font, np.round(x - xd + xo), np.round(y + yd + yo) + 1, angle, gc)
def get_text_width_height_descent(self, s, prop, ismath):
"""
get the width and height in display coords of the string s
with FontPropertry prop
# passing rgb is a little hack to make caching in the
# texmanager more efficient. It is not meant to be used
# outside the backend
"""
if rcParams['text.usetex']:
# todo: handle props
size = prop.get_size_in_points()
texmanager = self.get_texmanager()
fontsize = prop.get_size_in_points()
w, h, d = texmanager.get_text_width_height_descent(s, fontsize,
renderer=self)
return w, h, d
if ismath:
ox, oy, width, height, descent, fonts, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
return width, height, descent
flags = get_hinting_flag()
font = self._get_agg_font(prop)
font.set_text(s, 0.0, flags=flags) # the width and height of unrotated string
w, h = font.get_width_height()
d = font.get_descent()
w /= 64.0 # convert from subpixels
h /= 64.0
d /= 64.0
return w, h, d
def draw_tex(self, gc, x, y, s, prop, angle, ismath='TeX!', mtext=None):
# todo, handle props, angle, origins
size = prop.get_size_in_points()
texmanager = self.get_texmanager()
Z = texmanager.get_grey(s, size, self.dpi)
Z = np.array(Z * 255.0, np.uint8)
w, h, d = self.get_text_width_height_descent(s, prop, ismath)
xd = d * sin(radians(angle))
yd = d * cos(radians(angle))
x = np.round(x + xd)
y = np.round(y + yd)
self._renderer.draw_text_image(Z, x, y, angle, gc)
def get_canvas_width_height(self):
'return the canvas width and height in display coords'
return self.width, self.height
def _get_agg_font(self, prop):
"""
Get the font for text instance t, cacheing for efficiency
"""
if __debug__: verbose.report('RendererAgg._get_agg_font',
'debug-annoying')
fname = findfont(prop)
font = get_font(
fname,
hinting_factor=rcParams['text.hinting_factor'])
font.clear()
size = prop.get_size_in_points()
font.set_size(size, self.dpi)
return font
def points_to_pixels(self, points):
"""
convert point measures to pixes using dpi and the pixels per
inch of the display
"""
if __debug__: verbose.report('RendererAgg.points_to_pixels',
'debug-annoying')
return points*self.dpi/72.0
def tostring_rgb(self):
if __debug__: verbose.report('RendererAgg.tostring_rgb',
'debug-annoying')
return self._renderer.tostring_rgb()
def tostring_argb(self):
if __debug__: verbose.report('RendererAgg.tostring_argb',
'debug-annoying')
return self._renderer.tostring_argb()
def buffer_rgba(self):
if __debug__: verbose.report('RendererAgg.buffer_rgba',
'debug-annoying')
return self._renderer.buffer_rgba()
def clear(self):
self._renderer.clear()
def option_image_nocomposite(self):
# It is generally faster to composite each image directly to
# the Figure, and there's no file size benefit to compositing
# with the Agg backend
return True
def option_scale_image(self):
"""
agg backend doesn't support arbitrary scaling of image.
"""
return False
def restore_region(self, region, bbox=None, xy=None):
"""
Restore the saved region. If bbox (instance of BboxBase, or
its extents) is given, only the region specified by the bbox
will be restored. *xy* (a tuple of two floasts) optionally
specifies the new position (the LLC of the original region,
not the LLC of the bbox) where the region will be restored.
>>> region = renderer.copy_from_bbox()
>>> x1, y1, x2, y2 = region.get_extents()
>>> renderer.restore_region(region, bbox=(x1+dx, y1, x2, y2),
... xy=(x1-dx, y1))
"""
if bbox is not None or xy is not None:
if bbox is None:
x1, y1, x2, y2 = region.get_extents()
elif isinstance(bbox, BboxBase):
x1, y1, x2, y2 = bbox.extents
else:
x1, y1, x2, y2 = bbox
if xy is None:
ox, oy = x1, y1
else:
ox, oy = xy
# The incoming data is float, but the _renderer type-checking wants
# to see integers.
self._renderer.restore_region(region, int(x1), int(y1),
int(x2), int(y2), int(ox), int(oy))
else:
self._renderer.restore_region(region)
def start_filter(self):
"""
Start filtering. It simply create a new canvas (the old one is saved).
"""
self._filter_renderers.append(self._renderer)
self._renderer = _RendererAgg(int(self.width), int(self.height),
self.dpi)
self._update_methods()
def stop_filter(self, post_processing):
"""
Save the plot in the current canvas as a image and apply
the *post_processing* function.
def post_processing(image, dpi):
# ny, nx, depth = image.shape
# image (numpy array) has RGBA channels and has a depth of 4.
...
# create a new_image (numpy array of 4 channels, size can be
# different). The resulting image may have offsets from
# lower-left corner of the original image
return new_image, offset_x, offset_y
The saved renderer is restored and the returned image from
post_processing is plotted (using draw_image) on it.
"""
# WARNING.
# For agg_filter to work, the rendere's method need
# to overridden in the class. See draw_markers, and draw_path_collections
width, height = int(self.width), int(self.height)
buffer, bounds = self.tostring_rgba_minimized()
l, b, w, h = bounds
self._renderer = self._filter_renderers.pop()
self._update_methods()
if w > 0 and h > 0:
img = np.fromstring(buffer, np.uint8)
img, ox, oy = post_processing(img.reshape((h, w, 4)) / 255.,
self.dpi)
gc = self.new_gc()
if img.dtype.kind == 'f':
img = np.asarray(img * 255., np.uint8)
img = img[::-1]
self._renderer.draw_image(
gc, l + ox, height - b - h + oy, img)
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
if __debug__: verbose.report('backend_agg.new_figure_manager',
'debug-annoying')
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasAgg(figure)
manager = FigureManagerBase(canvas, num)
return manager
class FigureCanvasAgg(FigureCanvasBase):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Public attribute
figure - A Figure instance
"""
def copy_from_bbox(self, bbox):
renderer = self.get_renderer()
return renderer.copy_from_bbox(bbox)
def restore_region(self, region, bbox=None, xy=None):
renderer = self.get_renderer()
return renderer.restore_region(region, bbox, xy)
def draw(self):
"""
Draw the figure using the renderer
"""
if __debug__: verbose.report('FigureCanvasAgg.draw', 'debug-annoying')
self.renderer = self.get_renderer(cleared=True)
# acquire a lock on the shared font cache
RendererAgg.lock.acquire()
try:
self.figure.draw(self.renderer)
finally:
RendererAgg.lock.release()
def get_renderer(self, cleared=False):
l, b, w, h = self.figure.bbox.bounds
key = w, h, self.figure.dpi
try: self._lastKey, self.renderer
except AttributeError: need_new_renderer = True
else: need_new_renderer = (self._lastKey != key)
if need_new_renderer:
self.renderer = RendererAgg(w, h, self.figure.dpi)
self._lastKey = key
elif cleared:
self.renderer.clear()
return self.renderer
def tostring_rgb(self):
'''Get the image as an RGB byte string
`draw` must be called at least once before this function will work and
to update the renderer for any subsequent changes to the Figure.
Returns
-------
bytes
'''
if __debug__: verbose.report('FigureCanvasAgg.tostring_rgb',
'debug-annoying')
return self.renderer.tostring_rgb()
def tostring_argb(self):
'''Get the image as an ARGB byte string
`draw` must be called at least once before this function will work and
to update the renderer for any subsequent changes to the Figure.
Returns
-------
bytes
'''
if __debug__: verbose.report('FigureCanvasAgg.tostring_argb',
'debug-annoying')
return self.renderer.tostring_argb()
def buffer_rgba(self):
'''Get the image as an RGBA byte string
`draw` must be called at least once before this function will work and
to update the renderer for any subsequent changes to the Figure.
Returns
-------
bytes
'''
if __debug__: verbose.report('FigureCanvasAgg.buffer_rgba',
'debug-annoying')
return self.renderer.buffer_rgba()
def print_raw(self, filename_or_obj, *args, **kwargs):
FigureCanvasAgg.draw(self)
renderer = self.get_renderer()
original_dpi = renderer.dpi
renderer.dpi = self.figure.dpi
if is_string_like(filename_or_obj):
fileobj = open(filename_or_obj, 'wb')
close = True
else:
fileobj = filename_or_obj
close = False
try:
fileobj.write(renderer._renderer.buffer_rgba())
finally:
if close:
fileobj.close()
renderer.dpi = original_dpi
print_rgba = print_raw
def print_png(self, filename_or_obj, *args, **kwargs):
FigureCanvasAgg.draw(self)
renderer = self.get_renderer()
original_dpi = renderer.dpi
renderer.dpi = self.figure.dpi
if is_string_like(filename_or_obj):
filename_or_obj = open(filename_or_obj, 'wb')
close = True
else:
close = False
try:
_png.write_png(renderer._renderer, filename_or_obj, self.figure.dpi)
finally:
if close:
filename_or_obj.close()
renderer.dpi = original_dpi
def print_to_buffer(self):
FigureCanvasAgg.draw(self)
renderer = self.get_renderer()
original_dpi = renderer.dpi
renderer.dpi = self.figure.dpi
try:
result = (renderer._renderer.buffer_rgba(),
(int(renderer.width), int(renderer.height)))
finally:
renderer.dpi = original_dpi
return result
if _has_pil:
# add JPEG support
def print_jpg(self, filename_or_obj, *args, **kwargs):
"""
Supported kwargs:
*quality*: The image quality, on a scale from 1 (worst) to
95 (best). The default is 95, if not given in the
matplotlibrc file in the savefig.jpeg_quality parameter.
Values above 95 should be avoided; 100 completely
disables the JPEG quantization stage.
*optimize*: If present, indicates that the encoder should
make an extra pass over the image in order to select
optimal encoder settings.
*progressive*: If present, indicates that this image
should be stored as a progressive JPEG file.
"""
buf, size = self.print_to_buffer()
if kwargs.pop("dryrun", False):
return
# The image is "pasted" onto a white background image to safely
# handle any transparency
image = Image.frombuffer('RGBA', size, buf, 'raw', 'RGBA', 0, 1)
color = mcolors.colorConverter.to_rgb(
rcParams.get('savefig.facecolor', 'white'))
color = tuple([int(x * 255.0) for x in color])
background = Image.new('RGB', size, color)
background.paste(image, image)
options = restrict_dict(kwargs, ['quality', 'optimize',
'progressive'])
if 'quality' not in options:
options['quality'] = rcParams['savefig.jpeg_quality']
return background.save(filename_or_obj, format='jpeg', **options)
print_jpeg = print_jpg
# add TIFF support
def print_tif(self, filename_or_obj, *args, **kwargs):
buf, size = self.print_to_buffer()
if kwargs.pop("dryrun", False):
return
image = Image.frombuffer('RGBA', size, buf, 'raw', 'RGBA', 0, 1)
dpi = (self.figure.dpi, self.figure.dpi)
return image.save(filename_or_obj, format='tiff',
dpi=dpi)
print_tiff = print_tif
FigureCanvas = FigureCanvasAgg
| gpl-3.0 |
WladimirSidorenko/DiscourseSegmenter | dsegmenter/bparseg/bparsegmenter.py | 1 | 14522 | #!/usr/bin/env python2.7
# -*- mode: python; coding: utf-8; -*-
##################################################################
# Documentation
"""Module providing discourse segmenter for constituency trees.
Attributes:
tree2tok (method): create dictionary mapping constituency trees to numbered
tokens
read_trees (method): read file and return a list of constituent dictionaries
trees2segs (method): align trees with corresponding segments
featgen (method): default feature generation function
classify (method): default classification method
.. moduleauthor:: Wladimir Sidorenko (Uladzimir Sidarenka)
"""
##################################################################
# Libraries
from __future__ import absolute_import, unicode_literals
from dsegmenter.common import NONE, prune_punc, score_substitute, \
translate_toks
from dsegmenter.bparseg.align import nw_align
from dsegmenter.bparseg.constituency_tree import Tree, CTree
from dsegmenter.treeseg import TreeSegmenter, DiscourseSegment, \
CONSTITUENCY, DEFAULT_SEGMENT
from sklearn.externals import joblib
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_selection import VarianceThreshold
from sklearn.metrics import precision_recall_fscore_support
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
import locale
import os
import re
##################################################################
# Constants
locale.setlocale(locale.LC_ALL, "")
N_FOLDS = 10
ESCAPE_QUOTE_RE = re.compile(r"\\+([\"'])")
ESCAPE_SLASH_RE = re.compile(r"\\/")
##################################################################
# Methods
def tree2tok(a_tree, a_start=0):
"""Create dictionary mapping constituency trees to numbered tokens.
Args:
a_tree (constituency_tree.Tree): tree to analyze
a_start (int): starting position of the first token
Returns:
(dict) mapping from subtrees to their yields
"""
rset = set()
chset = None
tr2tk = {(a_start, a_tree.label()): (a_tree, rset)}
i = a_start
for child in a_tree:
if isinstance(child, Tree):
tr2tk.update(tree2tok(child, i))
chset = tr2tk[(i, child.label())][-1]
i += len(chset)
rset.update(chset)
else:
rset.add((i, child))
i += 1
return tr2tk
def read_tok_trees(a_lines, a_one_per_line=False):
"""Read file and return a list of constituent dictionaries.
Args:
a_lines (list[str]): decoded lines of the input file
a_one_per_line (bool): boolean flag indicating whether each
tree is stored on a separate line
Returns:
2-tuple: list of dictionaries mapping tokens to trees and a list of trees
"""
ctrees = CTree.parse_lines(a_lines, a_one_per_line=a_one_per_line)
# generate dictionaries mapping trees' yields to trees
t_cnt = 0
t2t = None
trees2toks = dict()
for ctree in ctrees:
t2t = tree2tok(ctree, t_cnt)
trees2toks.update(t2t)
t_cnt += len(t2t[(t_cnt, ctree.label())][-1])
toks2trees = dict()
for ((tree_c, tree_lbl), (tree, toks)) in trees2toks.iteritems():
toks = frozenset(toks)
if toks in toks2trees:
toks2trees[toks].append(tree)
else:
toks2trees[toks] = [tree]
return toks2trees, ctrees
def read_trees(a_lines, a_one_per_line=False):
"""Read file and return a list of constituent dictionaries.
Args:
a_lines (list[str]): decoded lines of the input file
a_one_per_line (bool): boolean flag indicating whether each
tree is stored on a separate line
Yields:
CTree: input tree
"""
for ctree in CTree.parse_lines(a_lines, a_one_per_line=a_one_per_line):
yield ctree
def trees2segs(a_toks2trees, a_toks2segs):
"""Align trees with corresponding segments.
Args:
a_toks2trees (dict): mapping from tokens to trees
a_toks2segs (dict): mapping from tokens to segments
Returns:
dict: mapping from trees to segments
"""
# prune empty trees and their corresponding segments
tree2seg = {t: None
for val in a_toks2trees.itervalues()
for t in val}
# add additional keys to `a_toks2trees` by pruning punctuation marks from
# existing trees
pruned_toks = None
tree_tok_keys = a_toks2trees.keys()
for tree_toks in tree_tok_keys:
pruned_toks = prune_punc(tree_toks)
if pruned_toks not in a_toks2trees:
a_toks2trees[pruned_toks] = a_toks2trees[tree_toks]
# establish a mapping between tree tokens and segment tokens
tree_toks = list(set([t
for t_set in a_toks2trees.iterkeys()
for t in t_set]))
tree_toks.sort(key=lambda el: el[0])
seg_toks = list(set([t
for t_set in a_toks2segs.iterkeys()
for t in t_set]))
seg_toks.sort(key=lambda el: el[0])
# align tokens if necessary
seg_t2tree_t = None
if tree_toks != seg_toks:
seg_t2tree_t = dict()
alignment = nw_align(seg_toks, tree_toks,
substitute=score_substitute,
keep_deleted=True)
for i, tt in enumerate(alignment):
seg_t2tree_t[seg_toks[i]] = [tree_toks[j] for j in tt]
# for each segment look if its corresponding token set is matched by
# any other subtree
translated_toks = None
for toks, segs in a_toks2segs.iteritems():
translated_toks = translate_toks(toks, seg_t2tree_t)
key = None
if translated_toks in a_toks2trees:
key = translated_toks
else:
translated_toks = prune_punc(translated_toks)
if translated_toks in a_toks2trees:
key = translated_toks
if key:
for tree in a_toks2trees[key]:
# if tree2seg[tree] is not None:
# continue
assert tree2seg[tree] is None, \
"Multiple segments found for tree" + repr(tree) + ": " + \
repr(segs[-1]) + "; " + repr(tree2seg[tree])
tree2seg[tree] = segs[-1]
return tree2seg
def featgen(a_tree):
"""Generate features for the given BitPar tree.
Args:
a_tree (dsegmenter.bparseg.constituency_tree.CTree): BitPar tree
for which we should generate features
Returns:
list: string features
"""
assert a_tree.leaves(), "Tree does not contain leaves."
# add unigram features
ret = {u"tok_{:s}".format(token.lower()): 1 for token in a_tree.leaves()}
# add very first and very last tokens of the tree
ret[u"tokFirst_{:s}".format(a_tree.leaves()[0].lower())] = 1
ret[u"tokLast_{:s}".format(a_tree.leaves()[-1].lower())] = 1
sublabels = [st.label() for st in a_tree.subtrees()]
if sublabels:
ret[u"lblFirst_{:s}".format(sublabels[0].lower())] = 1
ret[u"lblLast_{:s}".format(sublabels[-1].lower())] = 1
# add tree label
ret[u"lbl_{:s}".format(a_tree.label())] = 1
# add label of the parent tree
ret[u"prntLbl_{:s}".format(a_tree.prnt_label())] = 1
# add first and last word of the parent tree
if a_tree.parent():
prnt_tree = a_tree.parent()
t_idx = a_tree.parent_index()
ret[u"treeIdx"] = t_idx
if t_idx > 0:
prev_tree = prnt_tree[t_idx - 1]
ret[u"prevLbl_{:s}".format(prev_tree.label())] = 1
ret[u"prevTokFrst_{:s}".format(prev_tree.leaves()[0].lower())] = 1
ret[u"prevTokLst_{:s}".format(prev_tree.leaves()[-1].lower())] = 1
if t_idx + 1 < len(prnt_tree):
nxt_tree = prnt_tree[t_idx + 1]
ret[u"nxtLbl_{:s}".format(nxt_tree.label())] = 1
ret[u"pxtTokFrst_{:s}".format(nxt_tree.leaves()[0].lower())] = 1
ret[u"pxtTokLst_{:s}".format(nxt_tree.leaves()[-1].lower())] = 1
# add tree height
ret["height"] = a_tree.height()
# add label of the parent tree
return ret
def classify(a_classifier, a_featgen, a_el, a_default=None):
"""Classify given element.
Args:
a_classifier - model which should make predictions
a_featgen - feature generation function
a_el - constituency tree to be classified
a_default - default element that should be returned if el does
not yield segment
Returns:
str: assigned class
"""
prediction = a_classifier.predict(a_featgen(a_el))[0]
return a_default if prediction is None or \
prediction == NONE else prediction
##################################################################
# Class
class BparSegmenter(object):
"""Class for perfoming discourse segmentation on constituency trees.
"""
#:classifier object: default classification method
DEFAULT_CLASSIFIER = LinearSVC(C=0.3, multi_class='crammer_singer')
#:str: path to default model to use in classification
DEFAULT_MODEL = os.path.join(os.path.dirname(__file__), "data",
"bpar.model")
#:pipeline object: default pipeline object used for classification
DEFAULT_PIPELINE = Pipeline([('vectorizer', DictVectorizer()),
('var_filter', VarianceThreshold()),
('LinearSVC', DEFAULT_CLASSIFIER)])
def __init__(self, a_featgen=featgen, a_classify=classify,
a_model=DEFAULT_MODEL):
"""Class constructor.
Args:
a_featgen (method): function to be used for feature generation
a_classify (method): pointer to 2-arg function which predicts segment
class for BitPar tree based on the model and
features generated for that tree
a_model (str): path to a pre-trained model (previously dumped by
joblib) or valid classification object or None
"""
self.featgen = a_featgen
self.classify = a_classify
self._update_segmenter(a_model)
def segment(self, a_trees):
"""Create discourse segments based on the BitPar trees.
Args:
a_trees (list): list of sentence trees to be parsed
Returns:
list: constructed segment trees
"""
seg_idx = 0
segments = []
isegment = None
if self.model is None:
return [DiscourseSegment(a_name=DEFAULT_SEGMENT, a_leaves=t.leaves)
for t in a_trees]
for t in a_trees:
self._segmenter.segment(t, segments)
# if classifier failed to create one common segment for
# the whole tree, create one for it
if (len(segments) - seg_idx) > 1 or \
(len(segments) and not isinstance(segments[-1][-1],
DiscourseSegment)):
isegment = DiscourseSegment(a_name=DEFAULT_SEGMENT,
a_leaves=segments[seg_idx:])
segments[seg_idx:] = [(isegment.leaves[0][0], isegment)]
seg_idx = len(segments)
return segments
def train(self, a_trees, a_segs, a_path):
"""Train segmenter model.
Args:
a_trees (list): BitPar trees
a_segs (list): discourse segments
a_path (str): path to file in which the trained model should be
stored
Returns:
void:
"""
# drop current model
self._update_segmenter(self.DEFAULT_PIPELINE)
# generate features
feats = [self.featgen(t) for t in a_trees]
a_segs = [str(s) for s in a_segs]
# train classifier
self._train(feats, a_segs, self.model)
# store the model to file
joblib.dump(self.model, a_path)
def test(self, a_trees, a_segments):
"""Estimate performance of segmenter model.
Args:
a_trees (list): BitPar trees
a_segments (list): corresponding gold segments for trees
Returns:
2-tuple: macro and micro-averaged F-scores
"""
if self.model is None:
return (0, 0)
segments = [self.model.predict(self.featgen(itree))[0]
for itree in a_trees]
a_segments = [str(s) for s in a_segments]
_, _, macro_f1, _ = precision_recall_fscore_support(a_segments,
segments,
average='macro',
warn_for=())
_, _, micro_f1, _ = precision_recall_fscore_support(a_segments,
segments,
average='micro',
warn_for=())
return (macro_f1, micro_f1)
def _train(self, a_feats, a_segs, a_model):
"""Train segmenter model.
Args:
a_feats (list): BitPar featuress
a_segs (list): discourse segments
a_model: model object whose parameters should be fit
Returns:
void:
"""
# train classifier
a_model.fit(a_feats, a_segs)
self._update_segmenter(a_model)
def _update_segmenter(self, a_model):
"""Update model, decision function, and internal segmenter.
Args:
a_model: model used by classifier
Returns:
void:
"""
if a_model is None:
self.model = a_model
self.decfunc = lambda el: None
self._segmenter = TreeSegmenter(a_decfunc=self.decfunc,
a_type=CONSTITUENCY)
return
elif isinstance(a_model, str):
if not os.path.isfile(a_model) or not os.access(a_model, os.R_OK):
raise RuntimeError("Can't create model from"
" file {:s}".format(a_model))
self.model = joblib.load(a_model)
else:
self.model = a_model
self.decfunc = lambda el: self.classify(self.model, self.featgen, el)
self._segmenter = TreeSegmenter(a_decfunc=self.decfunc,
a_type=CONSTITUENCY)
| mit |
nelson-liu/scikit-learn | examples/cluster/plot_segmentation_toy.py | 91 | 3522 | """
===========================================
Spectral clustering for image segmentation
===========================================
In this example, an image with connected circles is generated and
spectral clustering is used to separate the circles.
In these settings, the :ref:`spectral_clustering` approach solves the problem
know as 'normalized graph cuts': the image is seen as a graph of
connected voxels, and the spectral clustering algorithm amounts to
choosing graph cuts defining regions while minimizing the ratio of the
gradient along the cut, and the volume of the region.
As the algorithm tries to balance the volume (ie balance the region
sizes), if we take circles with different sizes, the segmentation fails.
In addition, as there is no useful information in the intensity of the image,
or its gradient, we choose to perform the spectral clustering on a graph
that is only weakly informed by the gradient. This is close to performing
a Voronoi partition of the graph.
In addition, we use the mask of the objects to restrict the graph to the
outline of the objects. In this example, we are interested in
separating the objects one from the other, and not from the background.
"""
print(__doc__)
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
###############################################################################
l = 100
x, y = np.indices((l, l))
center1 = (28, 24)
center2 = (40, 50)
center3 = (67, 58)
center4 = (24, 70)
radius1, radius2, radius3, radius4 = 16, 14, 15, 14
circle1 = (x - center1[0]) ** 2 + (y - center1[1]) ** 2 < radius1 ** 2
circle2 = (x - center2[0]) ** 2 + (y - center2[1]) ** 2 < radius2 ** 2
circle3 = (x - center3[0]) ** 2 + (y - center3[1]) ** 2 < radius3 ** 2
circle4 = (x - center4[0]) ** 2 + (y - center4[1]) ** 2 < radius4 ** 2
###############################################################################
# 4 circles
img = circle1 + circle2 + circle3 + circle4
# We use a mask that limits to the foreground: the problem that we are
# interested in here is not separating the objects from the background,
# but separating them one from the other.
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(img, mask=mask)
# Take a decreasing function of the gradient: we take it weakly
# dependent from the gradient the segmentation is close to a voronoi
graph.data = np.exp(-graph.data / graph.data.std())
# Force the solver to be arpack, since amg is numerically
# unstable on this example
labels = spectral_clustering(graph, n_clusters=4, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
###############################################################################
# 2 circles
img = circle1 + circle2
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
graph = image.img_to_graph(img, mask=mask)
graph.data = np.exp(-graph.data / graph.data.std())
labels = spectral_clustering(graph, n_clusters=2, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
plt.show()
| bsd-3-clause |
larsoner/mne-python | tutorials/misc/plot_ecog.py | 4 | 7882 | """
.. _tut_working_with_ecog:
======================
Working with ECoG data
======================
MNE supports working with more than just MEG and EEG data. Here we show some
of the functions that can be used to facilitate working with
electrocorticography (ECoG) data.
This example shows how to use:
- ECoG data
- channel locations in subject's MRI space
- projection onto a surface
For an example that involves sEEG data, channel locations in
MNI space, or projection into a volume, see :ref:`tut_working_with_seeg`.
"""
# Authors: Eric Larson <[email protected]>
# Chris Holdgraf <[email protected]>
# Adam Li <[email protected]>
#
# License: BSD (3-clause)
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import mne
from mne.viz import plot_alignment, snapshot_brain_montage
print(__doc__)
# paths to mne datasets - sample ECoG and FreeSurfer subject
misc_path = mne.datasets.misc.data_path()
sample_path = mne.datasets.sample.data_path()
subject = 'sample'
subjects_dir = sample_path + '/subjects'
###############################################################################
# Let's load some ECoG electrode locations and names, and turn them into
# a :class:`mne.channels.DigMontage` class. First, use pandas to read in the
# ``.tsv`` file.
# In this tutorial, the electrode coordinates are assumed to be in meters
elec_df = pd.read_csv(misc_path + '/ecog/sample_ecog_electrodes.tsv',
sep='\t', header=0, index_col=None)
ch_names = elec_df['name'].tolist()
ch_coords = elec_df[['x', 'y', 'z']].to_numpy(dtype=float)
ch_pos = dict(zip(ch_names, ch_coords))
# Ideally the nasion/LPA/RPA will also be present from the digitization, here
# we use fiducials estimated from the subject's FreeSurfer MNI transformation:
lpa, nasion, rpa = mne.coreg.get_mni_fiducials(
subject, subjects_dir=subjects_dir)
lpa, nasion, rpa = lpa['r'], nasion['r'], rpa['r']
###############################################################################
# Now we make a :class:`mne.channels.DigMontage` stating that the ECoG
# contacts are in the FreeSurfer surface RAS (i.e., MRI) coordinate system.
montage = mne.channels.make_dig_montage(
ch_pos, coord_frame='mri', nasion=nasion, lpa=lpa, rpa=rpa)
print('Created %s channel positions' % len(ch_names))
###############################################################################
# Now we get the :term:`trans` that transforms from our MRI coordinate system
# to the head coordinate frame. This transform will be applied to the
# data when applying the montage so that standard plotting functions like
# :func:`mne.viz.plot_evoked_topomap` will be aligned properly.
trans = mne.channels.compute_native_head_t(montage)
print(trans)
###############################################################################
# Now that we have our montage, we can load in our corresponding
# time-series data and set the montage to the raw data.
# first we'll load in the sample dataset
raw = mne.io.read_raw_edf(misc_path + '/ecog/sample_ecog.edf')
# drop bad channels
raw.info['bads'].extend([ch for ch in raw.ch_names if ch not in ch_names])
raw.load_data()
raw.drop_channels(raw.info['bads'])
raw.crop(0, 2) # just process 2 sec of data for speed
# attach montage
raw.set_montage(montage)
# set channel types to ECoG (instead of EEG)
raw.set_channel_types({ch_name: 'ecog' for ch_name in raw.ch_names})
###############################################################################
# We can then plot the locations of our electrodes on our subject's brain.
# We'll use :func:`~mne.viz.snapshot_brain_montage` to save the plot as image
# data (along with xy positions of each electrode in the image), so that later
# we can plot frequency band power on top of it.
#
# .. note:: These are not real electrodes for this subject, so they
# do not align to the cortical surface perfectly.
fig = plot_alignment(raw.info, subject=subject, subjects_dir=subjects_dir,
surfaces=['pial'], trans=trans, coord_frame='mri')
mne.viz.set_3d_view(fig, 200, 70, focalpoint=[0, -0.005, 0.03])
xy, im = snapshot_brain_montage(fig, montage)
###############################################################################
# Next, we'll compute the signal power in the gamma (30-90 Hz) and alpha
# (8-12 Hz) bands.
gamma_power_t = raw.copy().filter(30, 90).apply_hilbert(
envelope=True).get_data()
alpha_power_t = raw.copy().filter(8, 12).apply_hilbert(
envelope=True).get_data()
gamma_power = gamma_power_t.mean(axis=-1)
alpha_power = alpha_power_t.mean(axis=-1)
###############################################################################
# Now let's use matplotlib to overplot frequency band power onto the electrodes
# which can be plotted on top of the brain from
# :func:`~mne.viz.snapshot_brain_montage`.
# Convert from a dictionary to array to plot
xy_pts = np.vstack([xy[ch] for ch in raw.info['ch_names']])
# colormap to view spectral power
cmap = 'viridis'
# Create a 1x2 figure showing the average power in gamma and alpha bands.
fig, axs = plt.subplots(1, 2, figsize=(20, 10))
# choose a colormap range wide enough for both frequency bands
_gamma_alpha_power = np.concatenate((gamma_power, alpha_power)).flatten()
vmin, vmax = np.percentile(_gamma_alpha_power, [10, 90])
for ax, band_power, band in zip(axs,
[gamma_power, alpha_power],
['Gamma', 'Alpha']):
ax.imshow(im)
ax.set_axis_off()
sc = ax.scatter(*xy_pts.T, c=band_power, s=200,
cmap=cmap, vmin=vmin, vmax=vmax)
ax.set_title(f'{band} band power', size='x-large')
fig.colorbar(sc, ax=axs)
###############################################################################
# Say we want to visualize the evolution of the power in the gamma band,
# instead of just plotting the average. We can use
# `matplotlib.animation.FuncAnimation` to create an animation and apply this
# to the brain figure.
# create an initialization and animation function
# to pass to FuncAnimation
def init():
"""Create an empty frame."""
return paths,
def animate(i, activity):
"""Animate the plot."""
paths.set_array(activity[:, i])
return paths,
# create the figure and apply the animation of the
# gamma frequency band activity
fig, ax = plt.subplots(figsize=(5, 5))
ax.imshow(im)
ax.set_axis_off()
paths = ax.scatter(*xy_pts.T, c=np.zeros(len(xy_pts)), s=200,
cmap=cmap, vmin=vmin, vmax=vmax)
fig.colorbar(paths, ax=ax)
ax.set_title('Gamma frequency over time (Hilbert transform)',
size='large')
# avoid edge artifacts and decimate, showing just a short chunk
sl = slice(100, 150)
show_power = gamma_power_t[:, sl]
anim = animation.FuncAnimation(fig, animate, init_func=init,
fargs=(show_power,),
frames=show_power.shape[1],
interval=100, blit=True)
###############################################################################
# Alternatively, we can project the sensor data to the nearest locations on
# the pial surface and visualize that:
# sphinx_gallery_thumbnail_number = 4
evoked = mne.EvokedArray(
gamma_power_t[:, sl], raw.info, tmin=raw.times[sl][0])
stc = mne.stc_near_sensors(evoked, trans, subject, subjects_dir=subjects_dir)
clim = dict(kind='value', lims=[vmin * 0.9, vmin, vmax])
brain = stc.plot(surface='pial', hemi='both', initial_time=0.68,
colormap='viridis', clim=clim, views='parietal',
subjects_dir=subjects_dir, size=(500, 500))
# You can save a movie like the one on our documentation website with:
# brain.save_movie(time_dilation=50, interpolation='linear', framerate=10,
# time_viewer=True)
| bsd-3-clause |
xubenben/scikit-learn | benchmarks/bench_plot_nmf.py | 206 | 5890 | """
Benchmarks of Non-Negative Matrix Factorization
"""
from __future__ import print_function
from collections import defaultdict
import gc
from time import time
import numpy as np
from scipy.linalg import norm
from sklearn.decomposition.nmf import NMF, _initialize_nmf
from sklearn.datasets.samples_generator import make_low_rank_matrix
from sklearn.externals.six.moves import xrange
def alt_nnmf(V, r, max_iter=1000, tol=1e-3, R=None):
'''
A, S = nnmf(X, r, tol=1e-3, R=None)
Implement Lee & Seung's algorithm
Parameters
----------
V : 2-ndarray, [n_samples, n_features]
input matrix
r : integer
number of latent features
max_iter : integer, optional
maximum number of iterations (default: 1000)
tol : double
tolerance threshold for early exit (when the update factor is within
tol of 1., the function exits)
R : integer, optional
random seed
Returns
-------
A : 2-ndarray, [n_samples, r]
Component part of the factorization
S : 2-ndarray, [r, n_features]
Data part of the factorization
Reference
---------
"Algorithms for Non-negative Matrix Factorization"
by Daniel D Lee, Sebastian H Seung
(available at http://citeseer.ist.psu.edu/lee01algorithms.html)
'''
# Nomenclature in the function follows Lee & Seung
eps = 1e-5
n, m = V.shape
if R == "svd":
W, H = _initialize_nmf(V, r)
elif R is None:
R = np.random.mtrand._rand
W = np.abs(R.standard_normal((n, r)))
H = np.abs(R.standard_normal((r, m)))
for i in xrange(max_iter):
updateH = np.dot(W.T, V) / (np.dot(np.dot(W.T, W), H) + eps)
H *= updateH
updateW = np.dot(V, H.T) / (np.dot(W, np.dot(H, H.T)) + eps)
W *= updateW
if i % 10 == 0:
max_update = max(updateW.max(), updateH.max())
if abs(1. - max_update) < tol:
break
return W, H
def report(error, time):
print("Frobenius loss: %.5f" % error)
print("Took: %.2fs" % time)
print()
def benchmark(samples_range, features_range, rank=50, tolerance=1e-5):
it = 0
timeset = defaultdict(lambda: [])
err = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
print("%2d samples, %2d features" % (n_samples, n_features))
print('=======================')
X = np.abs(make_low_rank_matrix(n_samples, n_features,
effective_rank=rank, tail_strength=0.2))
gc.collect()
print("benchmarking nndsvd-nmf: ")
tstart = time()
m = NMF(n_components=30, tol=tolerance, init='nndsvd').fit(X)
tend = time() - tstart
timeset['nndsvd-nmf'].append(tend)
err['nndsvd-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking nndsvda-nmf: ")
tstart = time()
m = NMF(n_components=30, init='nndsvda',
tol=tolerance).fit(X)
tend = time() - tstart
timeset['nndsvda-nmf'].append(tend)
err['nndsvda-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking nndsvdar-nmf: ")
tstart = time()
m = NMF(n_components=30, init='nndsvdar',
tol=tolerance).fit(X)
tend = time() - tstart
timeset['nndsvdar-nmf'].append(tend)
err['nndsvdar-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking random-nmf")
tstart = time()
m = NMF(n_components=30, init=None, max_iter=1000,
tol=tolerance).fit(X)
tend = time() - tstart
timeset['random-nmf'].append(tend)
err['random-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking alt-random-nmf")
tstart = time()
W, H = alt_nnmf(X, r=30, R=None, tol=tolerance)
tend = time() - tstart
timeset['alt-random-nmf'].append(tend)
err['alt-random-nmf'].append(np.linalg.norm(X - np.dot(W, H)))
report(norm(X - np.dot(W, H)), tend)
return timeset, err
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
axes3d
import matplotlib.pyplot as plt
samples_range = np.linspace(50, 500, 3).astype(np.int)
features_range = np.linspace(50, 500, 3).astype(np.int)
timeset, err = benchmark(samples_range, features_range)
for i, results in enumerate((timeset, err)):
fig = plt.figure('scikit-learn Non-Negative Matrix Factorization benchmark results')
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbgcm', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
zlabel = 'Time (s)' if i == 0 else 'reconstruction error'
ax.set_zlabel(zlabel)
ax.legend()
plt.show()
| bsd-3-clause |
dimkal/mne-python | mne/tests/test_dipole.py | 6 | 8269 | import os.path as op
import numpy as np
from nose.tools import assert_true, assert_equal, assert_raises
from numpy.testing import assert_allclose
import warnings
from mne import (read_dipole, read_forward_solution,
convert_forward_solution, read_evokeds, read_cov,
SourceEstimate, write_evokeds, fit_dipole,
transform_surface_to, make_sphere_model, pick_types,
pick_info, EvokedArray)
from mne.simulation import generate_evoked
from mne.datasets import testing
from mne.utils import (run_tests_if_main, _TempDir, slow_test, requires_mne,
run_subprocess, requires_sklearn)
from mne.proj import make_eeg_average_ref_proj
from mne.io import Raw
from mne.surface import _compute_nearest
from mne.bem import _bem_find_surface, read_bem_solution
from mne.transforms import (read_trans, apply_trans, _get_mri_head_t)
warnings.simplefilter('always')
data_path = testing.data_path(download=False)
fname_dip = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_set1.dip')
fname_evo = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc-ave.fif')
fname_cov = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc-cov.fif')
fname_bem = op.join(data_path, 'subjects', 'sample', 'bem',
'sample-1280-1280-1280-bem-sol.fif')
fname_fwd = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
def _compare_dipoles(orig, new):
"""Compare dipole results for equivalence"""
assert_allclose(orig.times, new.times, atol=1e-3, err_msg='times')
assert_allclose(orig.pos, new.pos, err_msg='pos')
assert_allclose(orig.amplitude, new.amplitude, err_msg='amplitude')
assert_allclose(orig.gof, new.gof, err_msg='gof')
assert_allclose(orig.ori, new.ori, rtol=1e-4, atol=1e-4, err_msg='ori')
assert_equal(orig.name, new.name)
def _check_dipole(dip, n_dipoles):
assert_equal(len(dip), n_dipoles)
assert_equal(dip.pos.shape, (n_dipoles, 3))
assert_equal(dip.ori.shape, (n_dipoles, 3))
assert_equal(dip.gof.shape, (n_dipoles,))
assert_equal(dip.amplitude.shape, (n_dipoles,))
@testing.requires_testing_data
def test_io_dipoles():
"""Test IO for .dip files
"""
tempdir = _TempDir()
dipole = read_dipole(fname_dip)
print(dipole) # test repr
out_fname = op.join(tempdir, 'temp.dip')
dipole.save(out_fname)
dipole_new = read_dipole(out_fname)
_compare_dipoles(dipole, dipole_new)
@slow_test
@testing.requires_testing_data
@requires_mne
def test_dipole_fitting():
"""Test dipole fitting"""
amp = 10e-9
tempdir = _TempDir()
rng = np.random.RandomState(0)
fname_dtemp = op.join(tempdir, 'test.dip')
fname_sim = op.join(tempdir, 'test-ave.fif')
fwd = convert_forward_solution(read_forward_solution(fname_fwd),
surf_ori=False, force_fixed=True)
evoked = read_evokeds(fname_evo)[0]
cov = read_cov(fname_cov)
n_per_hemi = 5
vertices = [np.sort(rng.permutation(s['vertno'])[:n_per_hemi])
for s in fwd['src']]
nv = sum(len(v) for v in vertices)
stc = SourceEstimate(amp * np.eye(nv), vertices, 0, 0.001)
with warnings.catch_warnings(record=True): # semi-def cov
evoked = generate_evoked(fwd, stc, evoked, cov, snr=20,
random_state=rng)
# For speed, let's use a subset of channels (strange but works)
picks = np.sort(np.concatenate([
pick_types(evoked.info, meg=True, eeg=False)[::2],
pick_types(evoked.info, meg=False, eeg=True)[::2]]))
evoked.pick_channels([evoked.ch_names[p] for p in picks])
evoked.add_proj(make_eeg_average_ref_proj(evoked.info))
write_evokeds(fname_sim, evoked)
# Run MNE-C version
run_subprocess([
'mne_dipole_fit', '--meas', fname_sim, '--meg', '--eeg',
'--noise', fname_cov, '--dip', fname_dtemp,
'--mri', fname_fwd, '--reg', '0', '--tmin', '0',
])
dip_c = read_dipole(fname_dtemp)
# Run mne-python version
sphere = make_sphere_model(head_radius=0.1)
dip, residuals = fit_dipole(evoked, fname_cov, sphere, fname_fwd)
# Sanity check: do our residuals have less power than orig data?
data_rms = np.sqrt(np.sum(evoked.data ** 2, axis=0))
resi_rms = np.sqrt(np.sum(residuals ** 2, axis=0))
assert_true((data_rms > resi_rms).all())
# Compare to original points
transform_surface_to(fwd['src'][0], 'head', fwd['mri_head_t'])
transform_surface_to(fwd['src'][1], 'head', fwd['mri_head_t'])
src_rr = np.concatenate([s['rr'][v] for s, v in zip(fwd['src'], vertices)],
axis=0)
src_nn = np.concatenate([s['nn'][v] for s, v in zip(fwd['src'], vertices)],
axis=0)
# MNE-C skips the last "time" point :(
dip.crop(dip_c.times[0], dip_c.times[-1])
src_rr, src_nn = src_rr[:-1], src_nn[:-1]
# check that we did at least as well
corrs, dists, gc_dists, amp_errs, gofs = [], [], [], [], []
for d in (dip_c, dip):
new = d.pos
diffs = new - src_rr
corrs += [np.corrcoef(src_rr.ravel(), new.ravel())[0, 1]]
dists += [np.sqrt(np.mean(np.sum(diffs * diffs, axis=1)))]
gc_dists += [180 / np.pi * np.mean(np.arccos(np.sum(src_nn * d.ori,
axis=1)))]
amp_errs += [np.sqrt(np.mean((amp - d.amplitude) ** 2))]
gofs += [np.mean(d.gof)]
assert_true(dists[0] >= dists[1], 'dists: %s' % dists)
assert_true(corrs[0] <= corrs[1], 'corrs: %s' % corrs)
assert_true(gc_dists[0] >= gc_dists[1], 'gc-dists (ori): %s' % gc_dists)
assert_true(amp_errs[0] >= amp_errs[1], 'amplitude errors: %s' % amp_errs)
# assert_true(gofs[0] <= gofs[1], 'gof: %s' % gofs)
@testing.requires_testing_data
def test_len_index_dipoles():
"""Test len and indexing of Dipole objects
"""
dipole = read_dipole(fname_dip)
d0 = dipole[0]
d1 = dipole[:1]
_check_dipole(d0, 1)
_check_dipole(d1, 1)
_compare_dipoles(d0, d1)
mask = dipole.gof > 15
idx = np.where(mask)[0]
d_mask = dipole[mask]
_check_dipole(d_mask, 4)
_compare_dipoles(d_mask, dipole[idx])
@requires_sklearn
@testing.requires_testing_data
def test_min_distance_fit_dipole():
"""Test dipole min_dist to inner_skull"""
data_path = testing.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_trunc_raw.fif'
subjects_dir = op.join(data_path, 'subjects')
fname_cov = op.join(data_path, 'MEG', 'sample', 'sample_audvis-cov.fif')
fname_trans = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-trans.fif')
fname_bem = op.join(subjects_dir, 'sample', 'bem',
'sample-1280-1280-1280-bem-sol.fif')
subject = 'sample'
raw = Raw(raw_fname, preload=True)
# select eeg data
picks = pick_types(raw.info, meg=False, eeg=True, exclude='bads')
info = pick_info(raw.info, picks)
# Let's use cov = Identity
cov = read_cov(fname_cov)
cov['data'] = np.eye(cov['data'].shape[0])
# Simulated scal map
simulated_scalp_map = np.zeros(picks.shape[0])
simulated_scalp_map[27:34] = 1
simulated_scalp_map = simulated_scalp_map[:, None]
evoked = EvokedArray(simulated_scalp_map, info, tmin=0)
min_dist = 5. # distance in mm
dip, residual = fit_dipole(evoked, cov, fname_bem, fname_trans,
min_dist=min_dist)
dist = _compute_depth(dip, fname_bem, fname_trans, subject, subjects_dir)
assert_true(min_dist < (dist[0] * 1000.) < (min_dist + 1.))
assert_raises(ValueError, fit_dipole, evoked, cov, fname_bem, fname_trans,
-1.)
def _compute_depth(dip, fname_bem, fname_trans, subject, subjects_dir):
"""Compute dipole depth"""
trans = read_trans(fname_trans)
trans = _get_mri_head_t(trans)[0]
bem = read_bem_solution(fname_bem)
surf = _bem_find_surface(bem, 'inner_skull')
points = surf['rr']
points = apply_trans(trans['trans'], points)
depth = _compute_nearest(points, dip.pos, return_dists=True)[1][0]
return np.ravel(depth)
run_tests_if_main(False)
| bsd-3-clause |
chappers/sklearn-recipes | streaming/dpp_classifier_bak.py | 1 | 9955 | import sklearn
from sklearn.datasets import make_regression, make_classification
from sklearn.linear_model import SGDRegressor, SGDClassifier
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics.pairwise import euclidean_distances
import pandas as pd
import numpy as np
from scipy import stats
from scipy.stats import wilcoxon
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.decomposition import PCA, KernelPCA
from sklearn.kernel_approximation import Nystroem
from dpp import sample_dpp, decompose_kernel, sample_conditional_dpp
def entropy(X):
mm = MinMaxScaler()
X_mm = mm.fit_transform(X)
Dpq = euclidean_distances(X_mm)
D_bar = np.mean([x for x in np.triu(Dpq).flatten() if x != 0])
alpha = -np.log(0.5)/D_bar
sim_pq = np.exp(-alpha * Dpq)
log_sim_pq = np.log(sim_pq)
entropy = -2*np.sum(np.triu(sim_pq*log_sim_pq + ((1-sim_pq)*np.log((1-sim_pq))), 1))
return entropy
def wilcoxon_group(X, f):
"""
Wilcoxon is a very aggressive selector in an unsupervised sense.
Do we require a supervised group selection? (probably)
Probably one that is score based in order to select the "best" ones
similar to OGFS?
"""
# X is a matrix, f is a single vector
if len(X.shape) == 1:
return wilcoxon(X, f).pvalue
# now we shall perform and check each one...and return only the lowest pvalue
return np.min([wilcoxon(x, f) for x in X.T])
"""
Implement DPP version that is similar to what is done above
sketch of solution
------------------
DPP requires a known number of parameters to check at each partial fit!
"""
class DPPClassifier(SGDClassifier):
def __init__(self, loss="log", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, max_iter=None, tol=None, shuffle=True,
verbose=0, epsilon=0.1, n_jobs=1,
random_state=None, learning_rate="optimal", eta0=0.0,
power_t=0.5, class_weight=None, warm_start=False,
average=False, n_iter=None,
intragroup_alpha=0.05, intergroup_thres=None):
super(DPPClassifier, self).__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, max_iter=max_iter, tol=tol,
shuffle=shuffle, verbose=verbose, epsilon=epsilon, n_jobs=n_jobs,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, class_weight=class_weight, warm_start=warm_start,
average=average, n_iter=n_iter)
self.coef_info = {'cols': [], 'coef':[], 'excluded_cols': []}
self.seen_cols = []
self.base_shape = None
self.dpp_k = {'pca': 0, 'kpca':0}
self.unseen_only = False
self.intragroup_alpha = intragroup_alpha
self.intergroup_thres = intergroup_thres if intergroup_thres is not None else epsilon
def _dpp_estimate_k(self, L):
"""
L is the input kernel
"""
pca = PCA(n_components=None)
pca.fit(L)
pca_k = np.min(np.argwhere(np.cumsum(pca.explained_variance_ratio_) >
(1-self.intragroup_alpha)))
# also use KernelPCA
kpca = KernelPCA(kernel='rbf')
kpca.fit(L)
kpca_k = np.argwhere(kpca.lambdas_ > 0.01).flatten().shape[0]
self.dpp_k['pca'] = pca_k
self.dpp_k['kpca'] = kpca_k
print("PCA K: {}".format(self.dpp_k))
print("L dim: {}".format(L.shape))
def add_column_exclusion(self, cols):
self.coef_info['excluded_cols'] = list(self.coef_info['excluded_cols']) + list(cols)
def _fit_columns(self, X_, return_x=True, transform_only=False):
"""
Method filter through "unselected" columns. The goal of this
method is to filter any uninformative columns.
This will be selected based on index only?
If return_x is false, it will only return the boolean mask.
"""
X = X_[X_.columns.difference(self.coef_info['excluded_cols'])]
# order the columns correctly...
col_order = self.coef_info['cols'] + list([x for x in X.columns if x not in self.coef_info['cols']])
X = X[col_order]
return X
def _reg_penalty(self, X):
col_coef = [(col, coef) for col, coef in zip(X.columns.tolist(), self.coef_.flatten()) if np.abs(coef) >= self.intergroup_thres]
self.coef_info['cols'] = [x for x, _ in col_coef]
self.coef_info['coef'] = [x for _, x in col_coef]
self.coef_info['excluded_cols'] = [x for x in self.seen_cols if x not in self.coef_info['cols']]
self.coef_ = np.array(self.coef_info['coef']).reshape(1, -1)
def _dpp_sel(self, X_, y=None):
"""
DPP only relies on X.
We will condition the sampling based on:
* `self.coef_info['cols']`
After sampling it will go ahead and then perform grouped wilcoxon selection.
"""
X = np.array(X_)
cols_to_index = [idx for idx, x in enumerate(X_.columns) if x in self.coef_info['cols']]
unseen_cols_to_index = [idx for idx, x in enumerate(X_.columns) if x not in self.coef_info['cols']]
if X.shape[0] < 1000:
feat_dist = rbf_kernel(X.T)
else:
feat_dist = Nystroem().fit_transform(X.T)
self._dpp_estimate_k(feat_dist)
k = self.dpp_k['pca'] - len(self.coef_info['cols'])
if k < 1:
# this means k is possibly negative, reevaluate k based only on new incoming feats!
self.unseen_only = True
unseen_kernel = feat_dist[unseen_cols_to_index, :][:, unseen_cols_to_index]
#k = max(self._dpp_estimate_k(unseen_kernel), int(unseen_kernel.shape[0] * 0.5)+1)
k = unseen_kernel.shape[0]
print("Unseen only")
print(k)
if len(self.coef_info['cols']) == 0:
feat_index = sample_dpp(decompose_kernel(feat_dist), k=k)
else:
feat_index = sample_conditional_dpp(feat_dist, cols_to_index, k=k)
# select features using entropy measure
# how can we order features from most to least relevant first?
# we chould do it using f test? Or otherwise - presume DPP selects best one first
"""
feat_entropy = []
excl_entropy = []
X_sel = X[:, feat_index]
for idx, feat in enumerate(X_sel.T):
if len(feat_entropy) == 0:
feat_entropy.append(idx)
continue
if entropy(X_sel[:, feat_entropy]) > entropy(X_sel[:, feat_entropy+[idx]]):
feat_entropy.append(idx)
else:
excl_entropy.append(idx)
"""
# iterate over feat_index to determine
# information on wilcoxon test
# as the feat index are already "ordered" as that is how DPP would
# perform the sampling - we will do the single pass in the same
# way it was approached in the OGFS
# feat index will have all previous sampled columns as well...
if not self.unseen_only:
feat_check = []
excl_check = []
X_sel = X[:, feat_index]
for idx, feat in enumerate(X_sel.T):
if len(feat_check) == 0:
feat_check.append(idx)
continue
if wilcoxon_group(X_sel[:, feat_check], feat) >= self.intragroup_alpha:
feat_check.append(idx)
else:
excl_check.append(idx)
index_to_col = [col for idx, col in enumerate(X_.columns) if idx in feat_check]
else:
# if we are considering unseen only, we will simply let the regulariser
# act on it, sim. to grafting.
index_to_col = [col for idx, col in enumerate(X_.columns) if idx in feat_index]
self.unseen_only = False # perhaps add more conditions around unseen - i.e. once unseen condition kicks in, it remains active?
self.coef_info['cols'] = list(set(self.coef_info['cols'] + index_to_col))
col_rem = X_.columns.difference(self.coef_info['cols'])
self.add_column_exclusion(col_rem)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
self.seen_cols = list(set(self.seen_cols + X.columns.tolist()))
# TODO: add DPP selection
self.coef_info = {'cols': [], 'coef':[], 'excluded_cols': []}
#self._dpp_sel(X, y)
#X = self._fit_columns(X)
super(DPPClassifier, self).fit(X, y, coef_init=coef_init, intercept_init=intercept_init,
sample_weight=sample_weight)
self._reg_penalty(X)
return self
def partial_fit(self, X, y, sample_weight=None):
X_ = X.copy()
self.seen_cols = list(set(self.seen_cols + X.columns.tolist()))
X = X[X.columns.difference(self.coef_info['excluded_cols'])]
# TODO: add DPP selection
self._dpp_sel(X, y)
X = self._fit_columns(X_)
# now update coefficients
n_samples, n_features = X.shape
coef_list = np.zeros(n_features, dtype=np.float64, order="C")
coef_list[:len(self.coef_info['coef'])] = self.coef_info['coef']
self.coef_ = np.array(coef_list).reshape(1, -1)
super(DPPClassifier, self).partial_fit(X, y, sample_weight=None)
self._reg_penalty(X)
return self
def predict(self, X):
X = self._fit_columns(X, transform_only=True)
return super(DPPClassifier, self).predict(X)
def predict_proba(self, X):
X = self._fit_columns(X, transform_only=True)
return super(DPPClassifier, self).predict_proba(X) | mit |
Zsailer/epistasis | epistasis/models/nonlinear/power.py | 2 | 5884 | import inspect
from functools import wraps
import scipy
import numpy as np
import pandas as pd
import lmfit
from lmfit import Parameter, Parameters
from epistasis.stats import gmean, pearson
from epistasis.models.utils import arghandler
from epistasis.models.linear.ordinary import EpistasisLinearRegression
from epistasis.models.nonlinear.ordinary import EpistasisNonlinearRegression
from .minimizer import FunctionMinimizer
# -------------------- Power Transform Function -----------------------
def power_transform(x, lmbda, A, B, data=None):
"""Transform x according to a power transformation.
Note, this functions calculates the geometric mean of x
to center the power transform on the data. If you'd like to calculate
the geometric mean on a different array than x (perhaps some
other data) pass that ohter array to the data keyword argument.
.. math::
y = \\frac{ x^{\\lambda} - 1 }{\\lambda [GM(x)]^{\\lambda - 1}}
Parameters
----------
x : array-like
data to transform.
lmbda : float
power parameter.
A : float
horizontal translation constant.
B : float
vertical translation constant.
data : array-like (default=None)
data to calculate the geometric mean.
"""
# Calculate the GMean on the data
if data is None:
gm = gmean(x + A)
else:
gm = gmean(data + A)
# Check for zeros
if lmbda == 0:
return gm * np.log(x + A)
else:
first = (x + A)**lmbda
out = (first - 1.0) / (lmbda * gm**(lmbda - 1)) + B
return out
# --------------------- Power transform Minizer object -----------------------
class PowerTransformMinizer(FunctionMinimizer):
"""Minimizer class for power transform.
"""
def __init__(self, **p0):
# Construct parameters object
self.parameters = Parameters()
for p in ['lmbda', 'A', 'B']:
# Get starting value of parameter if given.
val = None
if p in p0:
val = p0[p]
# Add parameter.
self.parameters.add(name=p, value=val)
# Set function
self._function = power_transform
def function(self, x, lmbda, A, B):
"""Execute the function."""
return self._function(x, lmbda=lmbda, A=A, B=B, data=self.data)
def predict(self, x):
return self._function(x, **self.parameters, data=self.data)
def fit(self, x, y):
self.data = x
# Set the lower bound on B.
self.parameters['A'].set(min=-min(x))
# Store residual steps in case fit fails.
last_residual_set = None
# Residual function to minimize.
def residual(params, func, x, y=None, data=None):
# Fit model
parvals = list(params.values())
ymodel = func(x, *parvals, data=data)
# Store items in case of error.
nonlocal last_residual_set
last_residual_set = (params, ymodel)
return y - ymodel
# Minimize the above residual function.
try:
self.minimizer = lmfit.minimize(residual, self.parameters,
args=[self._function, x],
kws={'y': y, 'data': self.data})
# If fitting fails, print what happened
except Exception as e:
# if e is ValueError
print("ERROR! Some of the transformed phenotypes are invalid.")
print("\nParameters:")
print("----------")
print(last_residual_set[0].pretty_print())
print("\nTransformed phenotypes:")
print("----------------------")
print(last_residual_set[1])
raise
self.parameters = self.minimizer.params
# -------------------- Epistasis Model -----------------------
class EpistasisPowerTransform(EpistasisNonlinearRegression):
"""Use power-transform function, via nonlinear least-squares regression,
to estimate epistatic coefficients and the nonlinear scale in a nonlinear
genotype-phenotype map.
This models has two steps:
1. Fit an additive, linear regression to approximate the average effect
of individual mutations.
2. Fit the nonlinear function to the observed phenotypes vs. the
additive phenotypes estimated in step 1.
Methods are described in the following publication:
Sailer, Z. R. & Harms, M. J. 'Detecting High-Order Epistasis in
Nonlinear Genotype-Phenotype Maps'. Genetics 205, 1079-1088 (2017).
Parameters
----------
model_type : str (default: global)
type of epistasis model to use. See paper above for more information.
Keyword Arguments
-----------------
Keyword arguments are interpreted as intial guesses for the nonlinear
function parameters. Must have the same name as parameters in the
nonlinear function
Attributes
----------
Additive : EpistasisLinearRegression
Linear regression object for fitting additive model
parameters : Parameters object
Mapping object for nonlinear coefficients
"""
def __init__(self, model_type="global", **p0):
# Set up the function for fitting.
self.function = power_transform
self.minimizer = PowerTransformMinizer(**p0)
self.parameters = self.minimizer.parameters
self.order = 1
self.Xbuilt = {}
# Construct parameters object
self.set_params(model_type=model_type)
# Store model specs.
self.model_specs = dict(
function=self.function,
model_type=self.model_type,
**p0)
# Set up additive and high-order linear model
self.Additive = EpistasisLinearRegression(
order=1, model_type=self.model_type)
| unlicense |
skidzo/sympy | examples/intermediate/sample.py | 107 | 3494 | """
Utility functions for plotting sympy functions.
See examples\mplot2d.py and examples\mplot3d.py for usable 2d and 3d
graphing functions using matplotlib.
"""
from sympy.core.sympify import sympify, SympifyError
from sympy.external import import_module
np = import_module('numpy')
def sample2d(f, x_args):
"""
Samples a 2d function f over specified intervals and returns two
arrays (X, Y) suitable for plotting with matlab (matplotlib)
syntax. See examples\mplot2d.py.
f is a function of one variable, such as x**2.
x_args is an interval given in the form (var, min, max, n)
"""
try:
f = sympify(f)
except SympifyError:
raise ValueError("f could not be interpretted as a SymPy function")
try:
x, x_min, x_max, x_n = x_args
except AttributeError:
raise ValueError("x_args must be a tuple of the form (var, min, max, n)")
x_l = float(x_max - x_min)
x_d = x_l/float(x_n)
X = np.arange(float(x_min), float(x_max) + x_d, x_d)
Y = np.empty(len(X))
for i in range(len(X)):
try:
Y[i] = float(f.subs(x, X[i]))
except TypeError:
Y[i] = None
return X, Y
def sample3d(f, x_args, y_args):
"""
Samples a 3d function f over specified intervals and returns three
2d arrays (X, Y, Z) suitable for plotting with matlab (matplotlib)
syntax. See examples\mplot3d.py.
f is a function of two variables, such as x**2 + y**2.
x_args and y_args are intervals given in the form (var, min, max, n)
"""
x, x_min, x_max, x_n = None, None, None, None
y, y_min, y_max, y_n = None, None, None, None
try:
f = sympify(f)
except SympifyError:
raise ValueError("f could not be interpreted as a SymPy function")
try:
x, x_min, x_max, x_n = x_args
y, y_min, y_max, y_n = y_args
except AttributeError:
raise ValueError("x_args and y_args must be tuples of the form (var, min, max, intervals)")
x_l = float(x_max - x_min)
x_d = x_l/float(x_n)
x_a = np.arange(float(x_min), float(x_max) + x_d, x_d)
y_l = float(y_max - y_min)
y_d = y_l/float(y_n)
y_a = np.arange(float(y_min), float(y_max) + y_d, y_d)
def meshgrid(x, y):
"""
Taken from matplotlib.mlab.meshgrid.
"""
x = np.array(x)
y = np.array(y)
numRows, numCols = len(y), len(x)
x.shape = 1, numCols
X = np.repeat(x, numRows, 0)
y.shape = numRows, 1
Y = np.repeat(y, numCols, 1)
return X, Y
X, Y = np.meshgrid(x_a, y_a)
Z = np.ndarray((len(X), len(X[0])))
for j in range(len(X)):
for k in range(len(X[0])):
try:
Z[j][k] = float(f.subs(x, X[j][k]).subs(y, Y[j][k]))
except (TypeError, NotImplementedError):
Z[j][k] = 0
return X, Y, Z
def sample(f, *var_args):
"""
Samples a 2d or 3d function over specified intervals and returns
a dataset suitable for plotting with matlab (matplotlib) syntax.
Wrapper for sample2d and sample3d.
f is a function of one or two variables, such as x**2.
var_args are intervals for each variable given in the form (var, min, max, n)
"""
if len(var_args) == 1:
return sample2d(f, var_args[0])
elif len(var_args) == 2:
return sample3d(f, var_args[0], var_args[1])
else:
raise ValueError("Only 2d and 3d sampling are supported at this time.")
| bsd-3-clause |
theislab/scanpy | scanpy/plotting/_baseplot_class.py | 1 | 38723 | """BasePlot for dotplot, matrixplot and stacked_violin
"""
import collections.abc as cabc
from collections import namedtuple
from typing import Optional, Union, Mapping # Special
from typing import Sequence, Iterable # ABCs
from typing import Tuple # Classes
import numpy as np
from anndata import AnnData
from matplotlib.axes import Axes
from matplotlib import pyplot as pl
from matplotlib import gridspec
from matplotlib.colors import Normalize
from warnings import warn
from .. import logging as logg
from .._compat import Literal
from ._utils import make_grid_spec, check_colornorm
from ._utils import ColorLike, _AxesSubplot
from ._anndata import _plot_dendrogram, _get_dendrogram_key, _prepare_dataframe
_VarNames = Union[str, Sequence[str]]
doc_common_groupby_plot_args = """\
title
Title for the figure
colorbar_title
Title for the color bar. New line character (\\n) can be used.
cmap
String denoting matplotlib color map.
standard_scale
Whether or not to standardize the given dimension between 0 and 1, meaning for
each variable or group, subtract the minimum and divide each by its maximum.
swap_axes
By default, the x axis contains `var_names` (e.g. genes) and the y axis
the `groupby` categories. By setting `swap_axes` then x are the
`groupby` categories and y the `var_names`.
return_fig
Returns :class:`DotPlot` object. Useful for fine-tuning
the plot. Takes precedence over `show=False`.
"""
class BasePlot(object):
"""\
Generic class for the visualization of AnnData categories and
selected `var` (features or genes).
Takes care of the visual location of a main plot, additional plots
in the margins (e.g. dendrogram, margin totals) and legends. Also
understand how to adapt the visual parameter if the plot is rotated
Classed based on BasePlot implement their own _mainplot() method.
The BasePlot works by method chaining. For example:
BasePlot(adata, ...).legend(title='legend').style(cmap='binary').show()
"""
DEFAULT_SAVE_PREFIX = 'baseplot_'
MIN_FIGURE_HEIGHT = 2.5
DEFAULT_CATEGORY_HEIGHT = 0.35
DEFAULT_CATEGORY_WIDTH = 0.37
# gridspec parameter. Sets the space between mainplot, dendrogram and legend
DEFAULT_WSPACE = 0
DEFAULT_COLORMAP = 'winter'
DEFAULT_LEGENDS_WIDTH = 1.5
DEFAULT_COLOR_LEGEND_TITLE = 'Expression\nlevel in group'
MAX_NUM_CATEGORIES = 500 # maximum number of categories allowed to be plotted
def __init__(
self,
adata: AnnData,
var_names: Union[_VarNames, Mapping[str, _VarNames]],
groupby: Union[str, Sequence[str]],
use_raw: Optional[bool] = None,
log: bool = False,
num_categories: int = 7,
categories_order: Optional[Sequence[str]] = None,
title: Optional['str'] = None,
figsize: Optional[Tuple[float, float]] = None,
gene_symbols: Optional[str] = None,
var_group_positions: Optional[Sequence[Tuple[int, int]]] = None,
var_group_labels: Optional[Sequence[str]] = None,
var_group_rotation: Optional[float] = None,
layer: Optional[str] = None,
ax: Optional[_AxesSubplot] = None,
vmin: Optional[float] = None,
vmax: Optional[float] = None,
vcenter: Optional[float] = None,
norm: Optional[Normalize] = None,
**kwds,
):
self.var_names = var_names
self.var_group_labels = var_group_labels
self.var_group_positions = var_group_positions
self.var_group_rotation = var_group_rotation
self.width, self.height = figsize if figsize is not None else (None, None)
self.has_var_groups = (
True
if var_group_positions is not None and len(var_group_positions) > 0
else False
)
self._update_var_groups()
self.categories, self.obs_tidy = _prepare_dataframe(
adata,
self.var_names,
groupby,
use_raw,
log,
num_categories,
layer=layer,
gene_symbols=gene_symbols,
)
if len(self.categories) > self.MAX_NUM_CATEGORIES:
warn(
f"Over {self.MAX_NUM_CATEGORIES} categories found. "
"Plot would be very large."
)
if categories_order is not None:
if set(self.obs_tidy.index.categories) != set(categories_order):
logg.error(
"Please check that the categories given by "
"the `order` parameter match the categories that "
"want to be reordered.\n\n"
"Mismatch: "
f"{set(self.obs_tidy.index.categories).difference(categories_order)}\n\n"
f"Given order categories: {categories_order}\n\n"
f"{groupby} categories: {list(self.obs_tidy.index.categories)}\n"
)
return
self.adata = adata
self.groupby = [groupby] if isinstance(groupby, str) else groupby
self.log = log
self.kwds = kwds
VBoundNorm = namedtuple('VBoundNorm', ['vmin', 'vmax', 'vcenter', 'norm'])
self.vboundnorm = VBoundNorm(vmin=vmin, vmax=vmax, vcenter=vcenter, norm=norm)
# set default values for legend
self.color_legend_title = self.DEFAULT_COLOR_LEGEND_TITLE
self.legends_width = self.DEFAULT_LEGENDS_WIDTH
# set style defaults
self.cmap = self.DEFAULT_COLORMAP
# style default parameters
self.are_axes_swapped = False
self.categories_order = categories_order
self.var_names_idx_order = None
self.wspace = self.DEFAULT_WSPACE
# minimum height required for legends to plot properly
self.min_figure_height = self.MIN_FIGURE_HEIGHT
self.fig_title = title
self.group_extra_size = 0
self.plot_group_extra = None
# after .render() is called the fig value is assigned and ax_dict
# contains a dictionary of the axes used in the plot
self.fig = None
self.ax_dict = None
self.ax = ax
def swap_axes(self, swap_axes: Optional[bool] = True):
"""
Plots a transposed image.
By default, the x axis contains `var_names` (e.g. genes) and the y
axis the `groupby` categories. By setting `swap_axes` then x are
the `groupby` categories and y the `var_names`.
Parameters
----------
swap_axes
Boolean to turn on (True) or off (False) 'swap_axes'. Default True
Returns
-------
BasePlot
"""
self.DEFAULT_CATEGORY_HEIGHT, self.DEFAULT_CATEGORY_WIDTH = (
self.DEFAULT_CATEGORY_WIDTH,
self.DEFAULT_CATEGORY_HEIGHT,
)
self.are_axes_swapped = swap_axes
return self
def add_dendrogram(
self,
show: Optional[bool] = True,
dendrogram_key: Optional[str] = None,
size: Optional[float] = 0.8,
):
"""\
Show dendrogram based on the hierarchical clustering between the `groupby`
categories. Categories are reordered to match the dendrogram order.
The dendrogram information is computed using :func:`scanpy.tl.dendrogram`.
If `sc.tl.dendrogram` has not been called previously the function is called
with default parameters.
The dendrogram is by default shown on the right side of the plot or on top
if the axes are swapped.
`var_names` are reordered to produce a more pleasing output if:
* The data contains `var_groups`
* the `var_groups` match the categories.
The previous conditions happen by default when using Plot
to show the results from :func:`~scanpy.tl.rank_genes_groups` (aka gene markers), by
calling `scanpy.tl.rank_genes_groups_(plot_name)`.
Parameters
----------
show
Boolean to turn on (True) or off (False) 'add_dendrogram'
dendrogram_key
Needed if `sc.tl.dendrogram` saved the dendrogram using a key different
than the default name.
size
size of the dendrogram. Corresponds to width when dendrogram shown on
the right of the plot, or height when shown on top. The unit is the same
as in matplotlib (inches).
Returns
-------
BasePlot
Examples
--------
>>> adata = sc.datasets.pbmc68k_reduced()
>>> markers = {'T-cell': 'CD3D', 'B-cell': 'CD79A', 'myeloid': 'CST3'}
>>> sc.pl.BasePlot(adata, markers, groupby='bulk_labels').add_dendrogram().show()
"""
if not show:
self.plot_group_extra = None
return self
if self.groupby is None or len(self.categories) <= 2:
# dendrogram can only be computed between groupby categories
logg.warning(
"Dendrogram not added. Dendrogram is added only "
"when the number of categories to plot > 2"
)
return self
self.group_extra_size = size
# to correctly plot the dendrogram the categories need to be ordered
# according to the dendrogram ordering.
self._reorder_categories_after_dendrogram(dendrogram_key)
dendro_ticks = np.arange(len(self.categories)) + 0.5
self.group_extra_size = size
self.plot_group_extra = {
'kind': 'dendrogram',
'width': size,
'dendrogram_key': dendrogram_key,
'dendrogram_ticks': dendro_ticks,
}
return self
def add_totals(
self,
show: Optional[bool] = True,
sort: Literal['ascending', 'descending'] = None,
size: Optional[float] = 0.8,
color: Optional[Union[ColorLike, Sequence[ColorLike]]] = None,
):
"""\
Show barplot for the number of cells in in `groupby` category.
The barplot is by default shown on the right side of the plot or on top
if the axes are swapped.
Parameters
----------
show
Boolean to turn on (True) or off (False) 'add_totals'
sort
Set to either 'ascending' or 'descending' to reorder the categories
by cell number
size
size of the barplot. Corresponds to width when shown on
the right of the plot, or height when shown on top. The unit is the same
as in matplotlib (inches).
color
Color for the bar plots or list of colors for each of the bar plots.
By default, each bar plot uses the colors assigned in
`adata.uns[{groupby}_colors]`.
Returns
-------
BasePlot
Examples
--------
>>> adata = sc.datasets.pbmc68k_reduced()
>>> markers = {'T-cell': 'CD3D', 'B-cell': 'CD79A', 'myeloid': 'CST3'}
>>> sc.pl.BasePlot(adata, markers, groupby='bulk_labels').add_totals().show()
"""
self.group_extra_size = size
if not show:
# hide totals
self.plot_group_extra = None
self.group_extra_size = 0
return self
_sort = True if sort is not None else False
_ascending = True if sort == 'ascending' else False
counts_df = self.obs_tidy.index.value_counts(sort=_sort, ascending=_ascending)
if _sort:
self.categories_order = counts_df.index
self.plot_group_extra = {
'kind': 'group_totals',
'width': size,
'sort': sort,
'counts_df': counts_df,
'color': color,
}
return self
def style(self, cmap: Optional[str] = DEFAULT_COLORMAP):
"""\
Set visual style parameters
Parameters
----------
cmap
colormap
Returns
-------
BasePlot
"""
self.cmap = cmap
def legend(
self,
show: Optional[bool] = True,
title: Optional[str] = DEFAULT_COLOR_LEGEND_TITLE,
width: Optional[float] = DEFAULT_LEGENDS_WIDTH,
):
"""\
Configure legend parameters
Parameters
----------
show
Set to 'False' to hide the default plot of the legend. This sets the
legend width to zero which will result in a wider main plot.
title
Legend title. Appears on top of the color bar. Use '\\n' to add line breaks.
width
Width of the legend. The unit is the same as in matplotlib (inches)
Returns
-------
BasePlot
Examples
--------
Set legend title:
>>> adata = sc.datasets.pbmc68k_reduced()
>>> markers = {'T-cell': 'CD3D', 'B-cell': 'CD79A', 'myeloid': 'CST3'}
>>> dp = sc.pl.BasePlot(adata, markers, groupby='bulk_labels')
>>> dp.legend(colorbar_title='log(UMI counts + 1)').show()
"""
if not show:
# turn of legends by setting width to 0
self.legends_width = 0
else:
self.color_legend_title = title
self.legends_width = width
return self
def get_axes(self):
if self.ax_dict is None:
self.make_figure()
return self.ax_dict
def _plot_totals(
self, total_barplot_ax: Axes, orientation: Literal['top', 'right']
):
"""
Makes the bar plot for totals
"""
params = self.plot_group_extra
counts_df = params['counts_df']
if self.categories_order is not None:
counts_df = counts_df.loc[self.categories_order]
if params['color'] is None:
if f'{self.groupby}_colors' in self.adata.uns:
color = self.adata.uns[f'{self.groupby}_colors']
else:
color = 'salmon'
else:
color = params['color']
if orientation == 'top':
counts_df.plot(
kind="bar",
color=color,
position=0.5,
ax=total_barplot_ax,
edgecolor="black",
width=0.65,
)
# add numbers to the top of the bars
max_y = max([p.get_height() for p in total_barplot_ax.patches])
for p in total_barplot_ax.patches:
p.set_x(p.get_x() + 0.5)
if p.get_height() >= 1000:
display_number = f'{np.round(p.get_height()/1000, decimals=1)}k'
else:
display_number = np.round(p.get_height(), decimals=1)
total_barplot_ax.annotate(
display_number,
(p.get_x() + p.get_width() / 2.0, (p.get_height() + max_y * 0.05)),
ha="center",
va="top",
xytext=(0, 10),
fontsize="x-small",
textcoords="offset points",
)
# for k in total_barplot_ax.spines.keys():
# total_barplot_ax.spines[k].set_visible(False)
total_barplot_ax.set_ylim(0, max_y * 1.4)
elif orientation == 'right':
counts_df.plot(
kind="barh",
color=color,
position=-0.3,
ax=total_barplot_ax,
edgecolor="black",
width=0.65,
)
# add numbers to the right of the bars
max_x = max([p.get_width() for p in total_barplot_ax.patches])
for p in total_barplot_ax.patches:
if p.get_width() >= 1000:
display_number = f'{np.round(p.get_width()/1000, decimals=1)}k'
else:
display_number = np.round(p.get_width(), decimals=1)
total_barplot_ax.annotate(
display_number,
((p.get_width()), p.get_y() + p.get_height()),
ha="center",
va="top",
xytext=(10, 10),
fontsize="x-small",
textcoords="offset points",
)
total_barplot_ax.set_xlim(0, max_x * 1.4)
total_barplot_ax.grid(False)
total_barplot_ax.axis("off")
def _plot_colorbar(self, color_legend_ax: Axes, normalize):
"""
Plots a horizontal colorbar given the ax an normalize values
Parameters
----------
color_legend_ax
normalize
Returns
-------
None, updates color_legend_ax
"""
cmap = pl.get_cmap(self.cmap)
import matplotlib.colorbar
matplotlib.colorbar.ColorbarBase(
color_legend_ax, orientation='horizontal', cmap=cmap, norm=normalize
)
color_legend_ax.set_title(self.color_legend_title, fontsize='small')
color_legend_ax.xaxis.set_tick_params(labelsize='small')
def _plot_legend(self, legend_ax, return_ax_dict, normalize):
# to maintain the fixed height size of the legends, a
# spacer of variable height is added at top and bottom.
# The structure for the legends is:
# first row: variable space to keep the first rows of the same size
# second row: size legend
legend_height = self.min_figure_height * 0.08
height_ratios = [
self.height - legend_height,
legend_height,
]
fig, legend_gs = make_grid_spec(
legend_ax, nrows=2, ncols=1, height_ratios=height_ratios
)
color_legend_ax = fig.add_subplot(legend_gs[1])
self._plot_colorbar(color_legend_ax, normalize)
return_ax_dict['color_legend_ax'] = color_legend_ax
def _mainplot(self, ax):
y_labels = self.categories
x_labels = self.var_names
if self.var_names_idx_order is not None:
x_labels = [x_labels[x] for x in self.var_names_idx_order]
if self.categories_order is not None:
y_labels = self.categories_order
if self.are_axes_swapped:
x_labels, y_labels = y_labels, x_labels
ax.set_xlabel(self.groupby)
else:
ax.set_ylabel(self.groupby)
y_ticks = np.arange(len(y_labels)) + 0.5
ax.set_yticks(y_ticks)
ax.set_yticklabels(y_labels)
x_ticks = np.arange(len(x_labels)) + 0.5
ax.set_xticks(x_ticks)
ax.set_xticklabels(x_labels, rotation=90, ha='center', minor=False)
ax.tick_params(axis='both', labelsize='small')
ax.grid(False)
# to be consistent with the heatmap plot, is better to
# invert the order of the y-axis, such that the first group is on
# top
ax.set_ylim(len(y_labels), 0)
ax.set_xlim(0, len(x_labels))
return check_colornorm(
self.vboundnorm.vmin,
self.vboundnorm.vmax,
self.vboundnorm.vcenter,
self.vboundnorm.norm,
)
def make_figure(self):
"""
Renders the image but does not call :func:`matplotlib.pyplot.show`. Useful
when several plots are put together into one figure.
See also
--------
`show()`: Renders and shows the plot.
`savefig()`: Saves the plot.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> adata = sc.datasets.pbmc68k_reduced()
>>> markers = ['C1QA', 'PSAP', 'CD79A', 'CD79B', 'CST3', 'LYZ']
>>> fig, (ax0, ax1) = plt.subplots(1, 2)
>>> sc.pl.MatrixPlot(adata, markers, groupby='bulk_labels', ax=ax0)\
... .style(cmap='Blues', edge_color='none').make_figure()
>>> sc.pl.DotPlot(adata, markers, groupby='bulk_labels', ax=ax1).make_figure()
"""
category_height = self.DEFAULT_CATEGORY_HEIGHT
category_width = self.DEFAULT_CATEGORY_WIDTH
if self.height is None:
mainplot_height = len(self.categories) * category_height
mainplot_width = (
len(self.var_names) * category_width + self.group_extra_size
)
if self.are_axes_swapped:
mainplot_height, mainplot_width = mainplot_width, mainplot_height
height = mainplot_height + 1 # +1 for labels
# if the number of categories is small use
# a larger height, otherwise the legends do not fit
self.height = max([self.min_figure_height, height])
self.width = mainplot_width + self.legends_width
else:
self.min_figure_height = self.height
mainplot_height = self.height
mainplot_width = self.width - (self.legends_width + self.group_extra_size)
return_ax_dict = {}
# define a layout of 1 rows x 2 columns
# first ax is for the main figure.
# second ax is to plot legends
legends_width_spacer = 0.7 / self.width
self.fig, gs = make_grid_spec(
self.ax or (self.width, self.height),
nrows=1,
ncols=2,
wspace=legends_width_spacer,
width_ratios=[mainplot_width + self.group_extra_size, self.legends_width],
)
if self.has_var_groups:
# add some space in case 'brackets' want to be plotted on top of the image
if self.are_axes_swapped:
var_groups_height = category_height
else:
var_groups_height = category_height / 2
else:
var_groups_height = 0
mainplot_width = mainplot_width - self.group_extra_size
spacer_height = self.height - var_groups_height - mainplot_height
if not self.are_axes_swapped:
height_ratios = [spacer_height, var_groups_height, mainplot_height]
width_ratios = [mainplot_width, self.group_extra_size]
else:
height_ratios = [spacer_height, self.group_extra_size, mainplot_height]
width_ratios = [mainplot_width, var_groups_height]
# gridspec is the same but rows and columns are swapped
if self.fig_title is not None and self.fig_title.strip() != '':
# for the figure title use the ax that contains
# all the main graphical elements (main plot, dendrogram etc)
# otherwise the title may overlay with the figure.
# also, this puts the title centered on the main figure and not
# centered between the main figure and the legends
_ax = self.fig.add_subplot(gs[0, 0])
_ax.axis('off')
_ax.set_title(self.fig_title)
# the main plot is divided into three rows and two columns
# first row is an spacer that is adjusted in case the
# legends need more height than the main plot
# second row is for brackets (if needed),
# third row is for mainplot and dendrogram/totals (legend goes in gs[0,1]
# defined earlier)
mainplot_gs = gridspec.GridSpecFromSubplotSpec(
nrows=3,
ncols=2,
wspace=self.wspace,
hspace=0.0,
subplot_spec=gs[0, 0],
width_ratios=width_ratios,
height_ratios=height_ratios,
)
main_ax = self.fig.add_subplot(mainplot_gs[2, 0])
return_ax_dict['mainplot_ax'] = main_ax
if not self.are_axes_swapped:
if self.plot_group_extra is not None:
group_extra_ax = self.fig.add_subplot(mainplot_gs[2, 1], sharey=main_ax)
group_extra_orientation = 'right'
if self.has_var_groups:
gene_groups_ax = self.fig.add_subplot(mainplot_gs[1, 0], sharex=main_ax)
var_group_orientation = 'top'
else:
if self.plot_group_extra:
group_extra_ax = self.fig.add_subplot(mainplot_gs[1, 0], sharex=main_ax)
group_extra_orientation = 'top'
if self.has_var_groups:
gene_groups_ax = self.fig.add_subplot(mainplot_gs[2, 1], sharey=main_ax)
var_group_orientation = 'right'
if self.plot_group_extra is not None:
if self.plot_group_extra['kind'] == 'dendrogram':
_plot_dendrogram(
group_extra_ax,
self.adata,
self.groupby,
dendrogram_key=self.plot_group_extra['dendrogram_key'],
ticks=self.plot_group_extra['dendrogram_ticks'],
orientation=group_extra_orientation,
)
if self.plot_group_extra['kind'] == 'group_totals':
self._plot_totals(group_extra_ax, group_extra_orientation)
return_ax_dict['group_extra_ax'] = group_extra_ax
# plot group legends on top or left of main_ax (if given)
if self.has_var_groups:
self._plot_var_groups_brackets(
gene_groups_ax,
group_positions=self.var_group_positions,
group_labels=self.var_group_labels,
rotation=self.var_group_rotation,
left_adjustment=0.2,
right_adjustment=0.7,
orientation=var_group_orientation,
)
return_ax_dict['gene_group_ax'] = gene_groups_ax
# plot the mainplot
normalize = self._mainplot(main_ax)
# code from pandas.plot in add_totals adds
# minor ticks that need to be removed
main_ax.yaxis.set_tick_params(which='minor', left=False, right=False)
main_ax.xaxis.set_tick_params(which='minor', top=False, bottom=False, length=0)
main_ax.set_zorder(100)
if self.legends_width > 0:
legend_ax = self.fig.add_subplot(gs[0, 1])
self._plot_legend(legend_ax, return_ax_dict, normalize)
self.ax_dict = return_ax_dict
def show(self, return_axes: Optional[bool] = None):
"""
Show the figure
Parameters
----------
return_axes
If true return a dictionary with the figure axes. When return_axes is true
then :func:`matplotlib.pyplot.show` is not called.
Returns
-------
If `return_axes=True`: Dict of :class:`matplotlib.axes.Axes`. The dict key
indicates the type of ax (eg. `mainplot_ax`)
See also
--------
`render()`: Renders the plot but does not call :func:`matplotlib.pyplot.show`
`savefig()`: Saves the plot.
Examples
-------
>>> adata = sc.datasets.pbmc68k_reduced()
>>> markers = ['C1QA', 'PSAP', 'CD79A', 'CD79B', 'CST3', 'LYZ']
>>> sc.pl.Plot(adata, markers, groupby='bulk_labels').show()
"""
self.make_figure()
if return_axes:
return self.ax_dict
else:
pl.show()
def savefig(self, filename: str, bbox_inches: Optional[str] = 'tight', **kwargs):
"""
Save the current figure
Parameters
----------
filename
Figure filename. Figure *format* is taken from the file ending unless
the parameter `format` is given.
bbox_inches
By default is set to 'tight' to avoid cropping of the legends.
kwargs
Passed to :func:`matplotlib.pyplot.savefig`
See also
--------
`render()`: Renders the plot but does not call :func:`matplotlib.pyplot.show`
`show()`: Renders and shows the plot
Examples
-------
>>> adata = sc.datasets.pbmc68k_reduced()
>>> markers = ['C1QA', 'PSAP', 'CD79A', 'CD79B', 'CST3', 'LYZ']
>>> sc.pl.BasePlot(adata, markers, groupby='bulk_labels').savefig('plot.pdf')
"""
self.make_figure()
pl.savefig(filename, bbox_inches=bbox_inches, **kwargs)
def _reorder_categories_after_dendrogram(self, dendrogram):
"""\
Function used by plotting functions that need to reorder the the groupby
observations based on the dendrogram results.
The function checks if a dendrogram has already been precomputed.
If not, `sc.tl.dendrogram` is run with default parameters.
The results found in `.uns[dendrogram_key]` are used to reorder
`var_group_labels` and `var_group_positions`.
Returns
-------
None internally updates
'categories_idx_ordered', 'var_group_names_idx_ordered',
'var_group_labels' and 'var_group_positions'
"""
def _format_first_three_categories(_categories):
"""used to clean up warning message"""
_categories = list(_categories)
if len(_categories) > 3:
_categories = _categories[:3] + ['etc.']
return ', '.join(_categories)
key = _get_dendrogram_key(self.adata, dendrogram, self.groupby)
dendro_info = self.adata.uns[key]
if self.groupby != dendro_info['groupby']:
raise ValueError(
"Incompatible observations. The precomputed dendrogram contains "
f"information for the observation: '{self.groupby}' while the plot is "
f"made for the observation: '{dendro_info['groupby']}. "
"Please run `sc.tl.dendrogram` using the right observation.'"
)
# order of groupby categories
categories_idx_ordered = dendro_info['categories_idx_ordered']
categories_ordered = dendro_info['categories_ordered']
if len(self.categories) != len(categories_idx_ordered):
raise ValueError(
"Incompatible observations. Dendrogram data has "
f"{len(categories_idx_ordered)} categories but current groupby "
f"observation {self.groupby!r} contains {len(self.categories)} categories. "
"Most likely the underlying groupby observation changed after the "
"initial computation of `sc.tl.dendrogram`. "
"Please run `sc.tl.dendrogram` again.'"
)
# reorder var_groups (if any)
if self.var_names is not None:
var_names_idx_ordered = list(range(len(self.var_names)))
if self.has_var_groups:
if set(self.var_group_labels) == set(self.categories):
positions_ordered = []
labels_ordered = []
position_start = 0
var_names_idx_ordered = []
for cat_name in categories_ordered:
idx = self.var_group_labels.index(cat_name)
position = self.var_group_positions[idx]
_var_names = self.var_names[position[0] : position[1] + 1]
var_names_idx_ordered.extend(range(position[0], position[1] + 1))
positions_ordered.append(
(position_start, position_start + len(_var_names) - 1)
)
position_start += len(_var_names)
labels_ordered.append(self.var_group_labels[idx])
self.var_group_labels = labels_ordered
self.var_group_positions = positions_ordered
else:
logg.warning(
"Groups are not reordered because the `groupby` categories "
"and the `var_group_labels` are different.\n"
f"categories: {_format_first_three_categories(self.categories)}\n"
"var_group_labels: "
f"{_format_first_three_categories(self.var_group_labels)}"
)
if var_names_idx_ordered is not None:
var_names_ordered = [self.var_names[x] for x in var_names_idx_ordered]
else:
var_names_ordered = None
self.categories_idx_ordered = categories_idx_ordered
self.categories_order = dendro_info['categories_ordered']
self.var_names_idx_order = var_names_idx_ordered
self.var_names_ordered = var_names_ordered
@staticmethod
def _plot_var_groups_brackets(
gene_groups_ax: Axes,
group_positions: Iterable[Tuple[int, int]],
group_labels: Sequence[str],
left_adjustment: float = -0.3,
right_adjustment: float = 0.3,
rotation: Optional[float] = None,
orientation: Literal['top', 'right'] = 'top',
):
"""\
Draws brackets that represent groups of genes on the give axis.
For best results, this axis is located on top of an image whose
x axis contains gene names.
The gene_groups_ax should share the x axis with the main ax.
Eg: gene_groups_ax = fig.add_subplot(axs[0, 0], sharex=dot_ax)
Parameters
----------
gene_groups_ax
In this axis the gene marks are drawn
group_positions
Each item in the list, should contain the start and end position that the
bracket should cover.
Eg. [(0, 4), (5, 8)] means that there are two brackets, one for the var_names (eg genes)
in positions 0-4 and other for positions 5-8
group_labels
List of group labels
left_adjustment
adjustment to plot the bracket start slightly before or after the first gene position.
If the value is negative the start is moved before.
right_adjustment
adjustment to plot the bracket end slightly before or after the last gene position
If the value is negative the start is moved before.
rotation
rotation degrees for the labels. If not given, small labels (<4 characters) are not
rotated, otherwise, they are rotated 90 degrees
orientation
location of the brackets. Either `top` or `right`
Returns
-------
None
"""
import matplotlib.patches as patches
from matplotlib.path import Path
# get the 'brackets' coordinates as lists of start and end positions
left = [x[0] + left_adjustment for x in group_positions]
right = [x[1] + right_adjustment for x in group_positions]
# verts and codes are used by PathPatch to make the brackets
verts = []
codes = []
if orientation == 'top':
# rotate labels if any of them is longer than 4 characters
if rotation is None and group_labels:
if max([len(x) for x in group_labels]) > 4:
rotation = 90
else:
rotation = 0
for idx, (left_coor, right_coor) in enumerate(zip(left, right)):
verts.append((left_coor, 0)) # lower-left
verts.append((left_coor, 0.6)) # upper-left
verts.append((right_coor, 0.6)) # upper-right
verts.append((right_coor, 0)) # lower-right
codes.append(Path.MOVETO)
codes.append(Path.LINETO)
codes.append(Path.LINETO)
codes.append(Path.LINETO)
group_x_center = left[idx] + float(right[idx] - left[idx]) / 2
gene_groups_ax.text(
group_x_center,
1.1,
group_labels[idx],
ha='center',
va='bottom',
rotation=rotation,
)
else:
top = left
bottom = right
for idx, (top_coor, bottom_coor) in enumerate(zip(top, bottom)):
verts.append((0, top_coor)) # upper-left
verts.append((0.4, top_coor)) # upper-right
verts.append((0.4, bottom_coor)) # lower-right
verts.append((0, bottom_coor)) # lower-left
codes.append(Path.MOVETO)
codes.append(Path.LINETO)
codes.append(Path.LINETO)
codes.append(Path.LINETO)
diff = bottom[idx] - top[idx]
group_y_center = top[idx] + float(diff) / 2
if diff * 2 < len(group_labels[idx]):
# cut label to fit available space
group_labels[idx] = group_labels[idx][: int(diff * 2)] + "."
gene_groups_ax.text(
1.1,
group_y_center,
group_labels[idx],
ha='right',
va='center',
rotation=270,
fontsize='small',
)
path = Path(verts, codes)
patch = patches.PathPatch(path, facecolor='none', lw=1.5)
gene_groups_ax.add_patch(patch)
gene_groups_ax.grid(False)
gene_groups_ax.axis('off')
# remove y ticks
gene_groups_ax.tick_params(axis='y', left=False, labelleft=False)
# remove x ticks and labels
gene_groups_ax.tick_params(
axis='x', bottom=False, labelbottom=False, labeltop=False
)
def _update_var_groups(self):
"""
checks if var_names is a dict. Is this is the cases, then set the
correct values for var_group_labels and var_group_positions
updates var_names, var_group_labels, var_group_positions
Returns
-------
None
"""
if isinstance(self.var_names, cabc.Mapping):
if self.has_var_groups:
logg.warning(
"`var_names` is a dictionary. This will reset the current "
"values of `var_group_labels` and `var_group_positions`."
)
var_group_labels = []
_var_names = []
var_group_positions = []
start = 0
for label, vars_list in self.var_names.items():
if isinstance(vars_list, str):
vars_list = [vars_list]
# use list() in case var_list is a numpy array or pandas series
_var_names.extend(list(vars_list))
var_group_labels.append(label)
var_group_positions.append((start, start + len(vars_list) - 1))
start += len(vars_list)
self.var_names = _var_names
self.var_group_labels = var_group_labels
self.var_group_positions = var_group_positions
self.has_var_groups = True
elif isinstance(self.var_names, str):
self.var_names = [self.var_names]
| bsd-3-clause |
rajat1994/scikit-learn | examples/linear_model/plot_polynomial_interpolation.py | 251 | 1895 | #!/usr/bin/env python
"""
========================
Polynomial interpolation
========================
This example demonstrates how to approximate a function with a polynomial of
degree n_degree by using ridge regression. Concretely, from n_samples 1d
points, it suffices to build the Vandermonde matrix, which is n_samples x
n_degree+1 and has the following form:
[[1, x_1, x_1 ** 2, x_1 ** 3, ...],
[1, x_2, x_2 ** 2, x_2 ** 3, ...],
...]
Intuitively, this matrix can be interpreted as a matrix of pseudo features (the
points raised to some power). The matrix is akin to (but different from) the
matrix induced by a polynomial kernel.
This example shows that you can do non-linear regression with a linear model,
using a pipeline to add non-linear features. Kernel methods extend this idea
and can induce very high (even infinite) dimensional feature spaces.
"""
print(__doc__)
# Author: Mathieu Blondel
# Jake Vanderplas
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import Ridge
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
def f(x):
""" function to approximate by polynomial interpolation"""
return x * np.sin(x)
# generate points used to plot
x_plot = np.linspace(0, 10, 100)
# generate points and keep a subset of them
x = np.linspace(0, 10, 100)
rng = np.random.RandomState(0)
rng.shuffle(x)
x = np.sort(x[:20])
y = f(x)
# create matrix versions of these arrays
X = x[:, np.newaxis]
X_plot = x_plot[:, np.newaxis]
plt.plot(x_plot, f(x_plot), label="ground truth")
plt.scatter(x, y, label="training points")
for degree in [3, 4, 5]:
model = make_pipeline(PolynomialFeatures(degree), Ridge())
model.fit(X, y)
y_plot = model.predict(X_plot)
plt.plot(x_plot, y_plot, label="degree %d" % degree)
plt.legend(loc='lower left')
plt.show()
| bsd-3-clause |
opcon/plutokore | scripts/generate-plots-for-marissa.py | 2 | 17219 | #!/usr/bin/env python3
""" Generate some simple plots from simulations
This script generates a few simple plots from the given simulation.
The goal is to highlight both the 1st and 2nd outburst in a 4-outburst
simulation.
The plots generated are:
* Density (full-plane reflected)
* Tracers (full-plane reflected)
* Surface brightness (full-plane reflected)
Changes:
* Inital version (Patrick, 27.10.2018)
"""
import os
import sys
if os.path.exists(os.path.expanduser('~/plutokore')):
sys.path.append(os.path.expanduser('~/plutokore'))
else:
sys.path.append(os.path.expanduser('~/uni/plutokore'))
import plutokore as pk
import matplotlib as mpl
mpl.use('PS')
import matplotlib.pyplot as plot
from matplotlib.colors import LinearSegmentedColormap
import numpy as np
import argparse
from plutokore import radio
from numba import jit
from astropy.convolution import convolve, Gaussian2DKernel
import astropy.units as u
from astropy.cosmology import Planck15 as cosmo
from astropy.table import QTable
import pathlib
import h5py
import code
def create_plots(*, sim_dir, plot_dir, output_number, sim_info, observing_properties, plot_properties):
"""
This function creates the three plots we want
"""
# load the simulation information
uv = sim_info['uv']
env = sim_info['env']
jet = sim_info['jet']
# load simulation
sim_data = pk.simulations.load_timestep_data(output_number, sim_dir, mmap=True)
sim_data.x2r[-1] = np.pi
rr, tt = np.meshgrid(sim_data.x1r, sim_data.x2r)
x = rr * np.cos(tt)
y = rr * np.sin(tt)
rmesh, tmesh = np.meshgrid(sim_data.x1, sim_data.x2)
# x, y = pk.simulations.sphericaltocartesian(sim_data, rotation=plot_properties['rotation'])
x = x * uv.length
y = y * uv.length
if plot_properties['plot_in_arcsec']:
x = (x * observing_properties['kpc2arcsec']).to(u.arcsec)
y = (y * observing_properties['kpc2arcsec']).to(u.arcsec)
# let's check if this simulation is quarter-plane or half-plane (2D)
if (sim_data.geometry == 'SPHERICAL') and (len(sim_data.nshp) == 2):
pass
else:
quit('Unsupported geometry and/or dimensions')
is_quarter_plane = (sim_data.x2[-1] - sim_data.x2[0]) < (3.0*np.pi / 4.0)
# plot density
f,a = setup_figure(sim_time = (sim_data.SimTime * uv.time).to(u.Myr), plot_properties = plot_properties, observing_properties = observing_properties)
rho = sim_data.rho * uv.density.to(u.kg / u.m ** 3).value
im = a.pcolormesh(x, y, np.log10(rho.T), vmin=-27, vmax=-23, rasterized = True, edgecolors = 'none', shading = 'flat')
im = a.pcolormesh(x, -y, np.log10(rho.T), vmin=-27, vmax=-23, rasterized = True, edgecolors = 'none', shading = 'flat')
if is_quarter_plane:
im = a.pcolormesh(-x, y, np.log10(rho.T), vmin=-27, vmax=-23, rasterized = True, edgecolors = 'none', shading = 'flat')
im = a.pcolormesh(-x, -y, np.log10(rho.T), vmin=-27, vmax=-23, rasterized = True, edgecolors = 'none', shading = 'flat')
cb = f.colorbar(im)
cb.set_label('Density [log10 kg cm^-3]')
save_figure(
fig=f,
ax=a,
cbx=cb,
plot_properties=plot_properties,
fig_path=os.path.join(plot_dir, f'density_{output_number:02d}.png'),
)
plot.close(f)
# plot pressure
f,a = setup_figure(sim_time = (sim_data.SimTime * uv.time).to(u.Myr), plot_properties = plot_properties, observing_properties = observing_properties)
prs = sim_data.prs * uv.pressure.to(u.Pa).value
im = a.pcolormesh(x, y, np.log10(prs.T), vmin=-16, vmax=-11.5, rasterized = True, edgecolors = 'none', shading = 'flat')
im = a.pcolormesh(x, -y, np.log10(prs.T), vmin=-16, vmax=-11.5, rasterized = True, edgecolors = 'none', shading = 'flat')
if is_quarter_plane:
im = a.pcolormesh(-x, -y, np.log10(prs.T), vmin=-16, vmax=-11.5, rasterized = True, edgecolors = 'none', shading = 'flat')
im = a.pcolormesh(-x, y, np.log10(prs.T), vmin=-16, vmax=-11.5, rasterized = True, edgecolors = 'none', shading = 'flat')
cb = f.colorbar(im)
cb.set_label('Pressure [log10 Pa]')
save_figure(
fig=f,
ax=a,
cbx=cb,
plot_properties=plot_properties,
fig_path=os.path.join(plot_dir, f'pressure_{output_number:02d}.png'),
)
plot.close(f)
# plot jet velocity
f,a = setup_figure(sim_time = (sim_data.SimTime * uv.time).to(u.Myr), plot_properties = plot_properties, observing_properties = observing_properties)
vx = (sim_data.vx1 * (np.sin(tmesh.T)) + rmesh.T * sim_data.vx2 * (np.cos(tmesh.T))) * uv.speed.to(u.km / u.s).value
vx = sim_data.vx1 * uv.speed.to(u.km / u.s).value
# import ipdb; ipdb.set_trace()
im = a.pcolormesh(x, y, vx.T, vmin=None, vmax=None, rasterized = True, edgecolors = 'none', shading = 'flat')
im = a.pcolormesh(x, -y, vx.T, vmin=None, vmax=None, rasterized = True, edgecolors = 'none', shading = 'flat')
if is_quarter_plane:
im = a.pcolormesh(-x, -y, vx.T, vmin=-16, vmax=-11.5, rasterized = True, edgecolors = 'none', shading = 'flat')
im = a.pcolormesh(-x, y, vx.T, vmin=-16, vmax=-11.5, rasterized = True, edgecolors = 'none', shading = 'flat')
cb = f.colorbar(im)
cb.set_label('Velocity [km s^-1]')
save_figure(
fig=f,
ax=a,
cbx=cb,
plot_properties=plot_properties,
fig_path=os.path.join(plot_dir, f'velocity_{output_number:02d}.png'),
)
plot.close(f)
# plot tracer
f,a = setup_figure(sim_time = (sim_data.SimTime * uv.time).to(u.Myr), plot_properties = plot_properties, observing_properties = observing_properties)
tracer_count = pk.simulations.get_tracer_count_data(sim_data)
tr1 = sim_data.tr1
im1 = a.pcolormesh(x, y, tr1.T, vmin=0, vmax=1, cmap='Blues_alpha', rasterized = True, edgecolors = 'none', shading = 'flat')
im1 = a.pcolormesh(x, -y, tr1.T, vmin=0, vmax=1, cmap='Blues_alpha', rasterized = True, edgecolors = 'none', shading = 'flat')
if is_quarter_plane:
im1 = a.pcolormesh(-x, -y, tr1.T, vmin=0, vmax=1, cmap='Blues_alpha', rasterized = True, edgecolors = 'none', shading = 'flat')
im1 = a.pcolormesh(-x, y, tr1.T, vmin=0, vmax=1, cmap='Blues_alpha', rasterized = True, edgecolors = 'none', shading = 'flat')
# only plot second tracer if we have more than one!
if tracer_count > 1:
tr2 = sim_data.tr2
im1 = a.pcolormesh(x, y, tr2.T, vmin=0, vmax=1, cmap='Reds_alpha', rasterized = True, edgecolors = 'none', shading = 'flat')
im1 = a.pcolormesh(x, -y, tr2.T, vmin=0, vmax=1, cmap='Reds_alpha', rasterized = True, edgecolors = 'none', shading = 'flat')
if is_quarter_plane:
im1 = a.pcolormesh(-x, -y, tr2.T, vmin=0, vmax=1, cmap='Reds_alpha', rasterized = True, edgecolors = 'none', shading = 'flat')
im1 = a.pcolormesh(-x, y, tr2.T, vmin=0, vmax=1, cmap='Reds_alpha', rasterized = True, edgecolors = 'none', shading = 'flat')
save_figure(
fig=f,
ax=a,
cbx=cb,
plot_properties=plot_properties,
fig_path=os.path.join(plot_dir, f'tracers_{output_number:02d}.png'),
)
plot.close(f)
f,a = setup_figure(sim_time = (sim_data.SimTime * uv.time).to(u.Myr), plot_properties = {**plot_properties, 'plot_in_arcsec': True}, observing_properties = observing_properties)
(X, Y, sb) = calculate_surface_brightness(
sim_data = sim_data,
uv = uv,
observing_properties = observing_properties,
is_quarter_plane = is_quarter_plane,
do_convolve = True,
)
im = a.pcolormesh(Y, X, np.log10(sb.value), vmin=-3, vmax=2, rasterized = True, edgecolors = 'face', shading = 'flat')
im = a.pcolormesh(Y, -X, np.log10(sb.value), vmin=-3, vmax=2, rasterized = True, edgecolors = 'face', shading = 'flat')
if is_quarter_plane:
im = a.pcolormesh(-Y, X, np.log10(sb.value), vmin=-3, vmax=2, rasterized = True, edgecolors = 'face', shading = 'flat')
im = a.pcolormesh(-Y, -X, np.log10(sb.value), vmin=-3, vmax=2, rasterized = True, edgecolors = 'face', shading = 'flat')
cb = f.colorbar(im)
cb.set_label('Surface Brightness [log10 mJy beam^-1]')
save_figure(
fig=f,
ax=a,
cbx=cb,
plot_properties=plot_properties,
fig_path=os.path.join(plot_dir, f'sb_{output_number:02d}.png'),
)
plot.close(f)
def setup_figure(*, sim_time, plot_properties, observing_properties):
fig,ax = plot.subplots(figsize=(10,5))
ax.set_xlim(observing_properties['xlim'].value)
ax.set_ylim(observing_properties['ylim'].value)
if plot_properties['plot_in_arcsec']:
ax.set_xlabel('X ["]')
ax.set_ylabel('Y ["]')
else:
ax.set_xlabel('X [kpc]')
ax.set_ylabel('Y [kpc]')
ax.set_title(f'{sim_time:0.02f}')
ax.set_aspect('equal')
return fig,ax
def save_figure(*, fig, ax, cbx, plot_properties, fig_path):
if plot_properties['fluff'] is False:
if cbx.ax in fig.axes:
fig.delaxes(cbx.ax)
ax.set_title('')
ax.set_position([0, 0, 1, 1])
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_xticks([])
ax.set_yticks([])
ax.set_axis_off()
fig.savefig(fig_path, dpi=plot_properties['dpi'], bbox_inches='tight')
@jit(nopython=True, cache=True)
def raytrace_surface_brightness(r, theta, x, y, z, raytraced_values, original_values):
phi = 0
rmax = np.max(r)
thetamax = np.max(theta)
x_half_step = (x[1] - x[0]) * 0.5
pi2_recip = (1 / (2 * np.pi))
visited = np.zeros(original_values.shape)
for x_index in range(len(x)):
for z_index in range(len(z)):
visited[:,:] = 0
for y_index in range(len(y)):
# Calculate the coordinates of this point
ri = np.sqrt(x[x_index] **2 + y[y_index] ** 2 + z[z_index] ** 2)
if ri == 0:
continue
if ri > rmax:
continue
thetai = np.arccos(z[z_index] / ri)
if thetai > thetamax:
continue
phii = 0 # Don't care about phii!!
chord_length = np.abs(np.arctan2(y[y_index], x[x_index] + x_half_step) - np.arctan2(y[y_index], x[x_index] - x_half_step))
# Now find index in r and theta arrays corresponding to this point
r_index = np.argmax(r>ri)
theta_index = np.argmax(theta>thetai)
# Only add this if we have not already visited this cell (twice)
if visited[r_index, theta_index] <= 1:
raytraced_values[x_index, z_index] += original_values[r_index, theta_index] * chord_length * pi2_recip
visited[r_index, theta_index] += 1
#return raytraced_values
return
def calculate_surface_brightness(*, sim_data, uv, observing_properties, do_convolve, is_quarter_plane):
xlim = observing_properties['ylim']
ylim = observing_properties['xlim']
# calculate beam radius
sigma_beam = (observing_properties['beamwidth'] / 2.355)
# calculate kpc per arcsec
kpc_per_arcsec = cosmo.kpc_proper_per_arcmin(observing_properties['redshift']).to(u.kpc / u.arcsec)
# load timestep data file
d = sim_data
# calculate luminosity and unraytraced flux
l = radio.get_luminosity(d, uv, observing_properties['redshift'], observing_properties['beamwidth'])
f = radio.get_flux_density(l, observing_properties['redshift']).to(u.Jy).value
# calculate raytracing grid
xmax = ((xlim[1] + observing_properties['pixelsize'] * kpc_per_arcsec) / uv.length).si
zmax = ((ylim[1] + observing_properties['pixelsize'] * kpc_per_arcsec) / uv.length).si
if not is_quarter_plane:
xmin = ((xlim[0] - observing_properties['pixelsize'] * kpc_per_arcsec) / uv.length).si
zmin = ((ylim[0] - observing_properties['pixelsize'] * kpc_per_arcsec) / uv.length).si
xstep = (observing_properties['pixelsize'] * kpc_per_arcsec / uv.length).si
zstep = (observing_properties['pixelsize'] * kpc_per_arcsec / uv.length).si
ymax = max(xmax, zmax)
ystep = min(xstep, zstep)
# ystep = ((0.25 * u.kpc) / uv.length).si
if is_quarter_plane:
x = np.arange(0, xmax, xstep)
z = np.arange(0, zmax, zstep)
else:
x = np.arange(0, xmax, xstep)
z = np.arange(zmin, zmax, zstep)
y = np.arange(-ymax, ymax, ystep)
raytraced_flux = np.zeros((x.shape[0], z.shape[0]))
# raytrace surface brightness
raytrace_surface_brightness(
r=d.x1,
theta=d.x2,
x=x,
y=y,
z=z,
original_values=f,
raytraced_values=raytraced_flux
)
raytraced_flux = raytraced_flux * u.Jy
# beam information
area_beam_kpc2 = (np.pi * (sigma_beam * kpc_per_arcsec)
**2).to(u.kpc**2)
beams_per_cell = (((observing_properties['pixelsize'] * kpc_per_arcsec) ** 2) / area_beam_kpc2).si
raytraced_flux /= beams_per_cell
beam_kernel = Gaussian2DKernel(sigma_beam.value)
if do_convolve:
flux = convolve(raytraced_flux.to(u.Jy), beam_kernel, boundary='extend') * u.Jy
else:
flux = raytraced_flux
X1 = x * (uv.length / kpc_per_arcsec).to(u.arcsec).value
X2 = z * (uv.length / kpc_per_arcsec).to(u.arcsec).value
return (X1, X2, flux.to(u.mJy))
def create_alpha_colormap(*, name):
ncolors = 256
color_array = plot.get_cmap(name)(range(ncolors))
color_array[:, -1] = np.linspace(0.0, 1.0, ncolors)
map_object = LinearSegmentedColormap.from_list(name=f'{name}_alpha', colors=color_array)
plot.register_cmap(cmap=map_object)
def main():
parser = argparse.ArgumentParser(formatter_class = argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('simulation_directory', help='Simulation directory', type=str)
parser.add_argument('output_directory', help='Output directory', type=str)
parser.add_argument('outputs', help='Output numbers', type=int, nargs='+')
parser.add_argument('--trc_cutoff', help='Tracer cutoff', type=float, default=1e-14)
parser.add_argument('--redshift', help='Redshift value', type=float, default=0.05)
parser.add_argument('--beamwidth', help='Observing beam width [arcsec]', type=float, default=5)
parser.add_argument('--pixelsize', help='Observing pixel size [arcsec]', type=float, default=1.8)
parser.add_argument('--xlim', help='X limits [kpc]', type=float, nargs=2, default=[-60,60])
parser.add_argument('--ylim', help='Y limits [kpc]', type=float, nargs=2, default=[-60,60])
parser.add_argument('--plot_in_arcsec', help='Plot axes in arsec')
parser.add_argument('--rotation', help='Rotation of output', type=float, default=np.pi / 2)
parser.add_argument('--dpi', help='DPI to save figure at', type=float, default=300)
parser.add_argument('--no_fluff', help='Save the figure without any axes labels, ticks, or titles', action='store_true')
args = parser.parse_args()
# Update observing properties
observing_properties = {
'redshift': args.redshift,
'beamwidth': args.beamwidth * u.arcsec,
'pixelsize': args.pixelsize * u.arcsec,
'xlim': args.xlim * u.kpc,
'ylim': args.ylim * u.kpc,
'kpc2arcsec': 1.0/cosmo.kpc_proper_per_arcmin(args.redshift).to(u.kpc / u.arcsec)
}
# update plot propterties
plot_properties = {
'plot_in_arcsec': args.plot_in_arcsec,
'rotation': args.rotation,
'dpi': args.dpi,
'fluff': not args.no_fluff,
}
# load the simulation information
uv, env, jet = pk.configuration.load_simulation_info(os.path.join(args.simulation_directory, 'config.yaml'))
sim_info = {
'uv': uv,
'env': env,
'jet': jet,
}
print('Generating plots for the following outputs:')
print(args.outputs)
print()
print('Observing propreties are:')
print(f'> r: {observing_properties["redshift"]}, beamwidth: {observing_properties["beamwidth"]}, pixelsize: {observing_properties["pixelsize"]}')
print(f'> xlim: {observing_properties["xlim"]}, ylim: {observing_properties["ylim"]}')
print()
print('The environment and jet properties are:')
print(f'> Environment: {type(env).__name__}, halo mass = {np.log10(env.halo_mass.value)}, central density = {env.central_density}')
print(f'> Jet: power = {jet.Q}, density = {jet.rho_0}, mach number = {jet.M_x}, half-opening angle = {np.rad2deg(jet.theta)}')
print()
# create output directory if needed
pathlib.Path(args.output_directory).mkdir(parents = True, exist_ok = True)
# Let's generate our custom colormaps
create_alpha_colormap(name='Blues')
create_alpha_colormap(name='Reds')
for i in args.outputs:
create_plots(
sim_dir = args.simulation_directory,
plot_dir = args.output_directory,
output_number = i,
sim_info = sim_info,
observing_properties = observing_properties,
plot_properties = plot_properties,
)
if __name__ == '__main__':
main()
# -*- coding: utf-8 -*-
| mit |
bobbymckinney/seebeck_measurement | old versions/SeebeckGUIv7.py | 2 | 108860 | #! /usr/bin/python
# -*- coding: utf-8 -*-
"""
Created: 2015-08-19
@author: Bobby McKinney ([email protected])
__Title__ : voltagepanel
Description:
Comments:
"""
import os
import sys
import wx
from wx.lib.pubsub import pub # For communicating b/w the thread and the GUI
import wx.lib.scrolledpanel as scrolled
import matplotlib
matplotlib.interactive(False)
matplotlib.use('WXAgg') # The recommended way to use wx with mpl is with WXAgg backend.
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg
from matplotlib.figure import Figure
from matplotlib.pyplot import gcf, setp
import matplotlib.animation as animation # For plotting
import pylab
import numpy as np
import matplotlib.pyplot as plt
import minimalmodbus as modbus # For communicating with the cn7500s
import omegacn7500 # Driver for cn7500s under minimalmodbus, adds a few easy commands
import visa # pyvisa, essential for communicating with the Keithley
from threading import Thread # For threading the processes going on behind the GUI
import time
from datetime import datetime # for getting the current date and time
# Modules for saving logs of exceptions
import exceptions
import sys
from logging_utils import setup_logging_to_file, log_exception
# for a fancy status bar:
import EnhancedStatusBar as ESB
#==============================================================================
# Keeps Windows from complaining that the port is already open:
modbus.CLOSE_PORT_AFTER_EACH_CALL = True
version = '7.0 (2015-11-19)'
'''
Global Variables:
'''
# Naming a data file:
dataFile = 'Data.csv'
statusFile = 'Status.csv'
seebeckFile = 'Seebeck.csv'
APP_EXIT = 1 # id for File\Quit
stability_threshold = 0.25/60
oscillation = 8 # Degree range that the PID will oscillate in
tolerance = (oscillation/5) # This must be set to less than oscillation
measureList = []
#dTlist = [0,-2,-4,-6,-8,-6,-4,-2,0,2,4,6,8,6,4,2,0]
dTlist = [0,-4,-8,-4,0,4,8,4,0]
maxLimit = 650 # Restricts the user to a max temperature
abort_ID = 0 # Abort method
# Global placers for instruments
k2700 = ''
sampleApid = ''
sampleBpid = ''
blockApid = ''
blockBpid = ''
tc_type = "k-type" # Set the thermocouple type in order to use the correct voltage correction
# Channels corresponding to switch card:
#tempAChannel = '109'
#tempBChannel = '110'
chromelChannel = '107'
alumelChannel = '108'
# placer for directory
filePath = 'global file path'
# placer for files to be created
myfile = 'global file'
rawfile = 'global file'
processfile = 'global file'
# Placers for the GUI plots:
chromelV_list = []
tchromelV_list = []
alumelV_list=[]
talumelV_list = []
sampletempA_list = []
tsampletempA_list = []
sampletempB_list = []
tsampletempB_list = []
tblocktemp_list = []
blocktempA_list = []
blocktempB_list = []
timecalclist = []
Vchromelcalclist = []
Valumelcalclist = []
dTcalclist = []
avgTcalclist = []
#ResourceManager for visa instrument control
ResourceManager = visa.ResourceManager()
###############################################################################
class Keithley_2700:
''' Used for the matrix card operations. '''
#--------------------------------------------------------------------------
def __init__(self, instr):
self.ctrl = ResourceManager.open_resource(instr)
#end init
#--------------------------------------------------------------------------
def fetch(self, channel):
"""
Scan the channel and take a reading
"""
measure = False
while (not measure):
try:
self.ctrl.write(":ROUTe:SCAN:INTernal (@ %s)" % (channel)) # Specify Channel
#keithley.write(":SENSe1:FUNCtion 'TEMPerature'") # Specify Data type
self.ctrl.write(":ROUTe:SCAN:LSELect INTernal") # Scan Selected Channel
time.sleep(.1)
self.ctrl.write(":ROUTe:SCAN:LSELect NONE") # Stop Scan
time.sleep(.1)
data = self.ctrl.query(":FETCh?")
time.sleep(.1)
data = float(str(data)[0:15])
measure = True
except exceptions.ValueError as VE:
print(VE)
measure = False
#end while
return data # Fetches Reading
#end def
#--------------------------------------------------------------------------
def openAllChannels(self):
self.ctrl.write("ROUTe:OPEN:ALL")
#end def
#end class
###############################################################################
###############################################################################
class PID(omegacn7500.OmegaCN7500):
#--------------------------------------------------------------------------
def __init__(self, portname, slaveaddress):
omegacn7500.OmegaCN7500.__init__(self, portname, slaveaddress)
#end init
#--------------------------------------------------------------------------
# Commands for easy reference:
# Use .write_register(command, value) and .read_register(command)
# All register values can be found in the Manual or Instruction Sheet.
# You must convert each address from Hex to Decimal.
control = 4101 # Register for control method
pIDcontrol = 0 # Value for PID control method
pIDparam = 4124 # Register for PID parameter selection
pIDparam_Auto = 4 # Value for Auto PID
tCouple = 4100 # Register for setting the temperature sensor type
tCouple_K = 0 # K type thermocouple
heatingCoolingControl = 4102 # Register for Heating/Cooling control selection
heating = 0 # Value for Heating setting
#end class
###############################################################################
###############################################################################
class Setup:
"""
Call this class to run the setup for the Keithley and the PID.
"""
def __init__(self):
"""
Prepare the Keithley to take data on the specified channels:
"""
global k2700
global sampleApid
global sampleBpid
global blockApid
global blockBpid
# Define Keithley instrument port:
self.k2700 = k2700 = Keithley_2700('GPIB0::1::INSTR')
# Define the ports for the PID
self.sampleApid = sampleApid = PID('/dev/cu.usbserial', 1) # Top heater
self.sampleBpid = sampleBpid = PID('/dev/cu.usbserial', 2) # Bottom heater
self.blockApid = blockApid = PID('/dev/cu.usbserial', 3) # Top block
self.blockBpid = blockBpid = PID('/dev/cu.usbserial', 4) # Top block
"""
Prepare the Keithley for operation:
"""
self.k2700.openAllChannels
# Define the type of measurement for the channels we are looking at:
#self.k2700.ctrl.write(":SENSe1:TEMPerature:TCouple:TYPE K") # Set ThermoCouple type
#self.k2700.ctrl.write(":SENSe1:FUNCtion 'TEMPerature', (@ 109,110)")
self.k2700.ctrl.write(":SENSe1:FUNCtion 'VOLTage:DC', (@ 107,108)")
self.k2700.ctrl.write(":TRIGger:SEQuence1:DELay 0")
self.k2700.ctrl.write(":TRIGger:SEQuence1:COUNt 1") # Set the count rate
# Sets the the acquisition rate of the measurements
self.k2700.ctrl.write(":SENSe1:VOLTage:DC:NPLCycles 4, (@ 107,108)") # Sets integration period based on frequency
#self.k2700.ctrl.write(":SENSe1:TEMPerature:NPLCycles 4, (@ 109,110)")
"""
Prepare the PID for operation:
"""
# Set the control method to PID
self.sampleApid.write_register(PID.control, PID.pIDcontrol)
self.sampleBpid.write_register(PID.control, PID.pIDcontrol)
# Set the PID to auto parameter
self.sampleApid.write_register(PID.pIDparam, PID.pIDparam_Auto)
self.sampleBpid.write_register(PID.pIDparam, PID.pIDparam_Auto)
# Set the thermocouple type
self.sampleApid.write_register(PID.tCouple, PID.tCouple_K)
self.sampleBpid.write_register(PID.tCouple, PID.tCouple_K)
self.blockApid.write_register(PID.tCouple, PID.tCouple_K)
self.blockBpid.write_register(PID.tCouple, PID.tCouple_K)
# Set the control to heating only
self.sampleApid.write_register(PID.heatingCoolingControl, PID.heating)
self.sampleBpid.write_register(PID.heatingCoolingControl, PID.heating)
# Run the controllers
self.sampleApid.run()
self.sampleBpid.run()
#end class
###############################################################################
###############################################################################
class ProcessThread(Thread):
"""
Thread that runs the operations behind the GUI. This includes measuring
and plotting.
"""
#--------------------------------------------------------------------------
def __init__(self):
""" Init Worker Thread Class """
Thread.__init__(self)
self.start()
#end init
#--------------------------------------------------------------------------
def run(self):
""" Run Worker Thread """
#Setup()
td=TakeData()
#td = TakeDataTest()
#end def
#end class
###############################################################################
###############################################################################
class InitialCheck:
"""
Intial Check of temperatures and voltages.
"""
#--------------------------------------------------------------------------
def __init__(self):
self.k2700 = k2700
self.sampleApid = sampleApid
self.sampleBpid = sampleBpid
self.blockApid = blockApid
self.blockBpid = blockBpid
self.take_temperature_Data()
self.take_voltage_Data()
#end init
#--------------------------------------------------------------------------
def take_temperature_Data(self):
""" Takes data from the PID
"""
# Take Data and time stamps:
self.sampletempA = float(self.sampleApid.get_pv())
time.sleep(0.1)
self.sampletempB = float(self.sampleBpid.get_pv())
time.sleep(0.1)
self.blocktempA = float(self.blockApid.get_pv())
time.sleep(0.1)
self.blocktempB = float(self.blockBpid.get_pv())
time.sleep(0.1)
self.samplesetpointA = float(self.sampleApid.get_setpoint())
time.sleep(0.1)
self.samplesetpointB = float(self.sampleBpid.get_setpoint())
time.sleep(0.1)
self.updateGUI(stamp="Sample Temp A Init", data=self.sampletempA)
self.updateGUI(stamp="Sample Temp B Init", data=self.sampletempB)
self.updateGUI(stamp="Setpoint A Init", data=self.samplesetpointA)
self.updateGUI(stamp="Setpoint B Init", data=self.samplesetpointB)
self.updateGUI(stamp="Block Temp A Init", data=self.blocktempA)
self.updateGUI(stamp="Block Temp B Init", data=self.blocktempB)
print "\nsample temp A: %f C\nblock temp A: %f C\nsample temp B: %f C\nblock temp B: %f C" % (self.sampletempA, self.blocktempA, self.sampletempB, self.blocktempB)
#end def
#--------------------------------------------------------------------------
def take_voltage_Data(self):
""" Takes data from the PID
"""
self.Vchromelraw = float(self.k2700.fetch(chromelChannel))*10**6
self.Vchromelcalc = self.voltage_Correction(self.Vchromelraw,self.sampletempA,self.sampletempB, 'chromel')
self.Valumelraw = float(self.k2700.fetch(alumelChannel))*10**6
self.Valumelcalc = self.voltage_Correction(self.Valumelraw,self.sampletempA,self.sampletempB, 'alumel')
self.updateGUI(stamp="Chromel Voltage Init", data=float(self.Vchromelcalc))
self.updateGUI(stamp="Alumel Voltage Init", data=float(self.Valumelcalc))
print "\nvoltage (Chromel): %f uV\nvoltage (Alumel): %f uV" % (self.Vchromelcalc, self.Valumelcalc)
#end def
#--------------------------------------------------------------------------
def voltage_Correction(self, raw_voltage, tempA, tempB, side):
''' raw_data must be in uV '''
# Kelvin conversion for polynomial correction.
dT = tempA - tempB
avgT = (tempA + tempB)/2 + 273.15
# Correction for effect from Thermocouple Seebeck
out = self.alpha(avgT, side)*dT - raw_voltage
return out
#end def
#--------------------------------------------------------------------------
def alpha(self, x, side):
''' x = avgT
alpha in uV/K
'''
global tc_type
if tc_type == "k-type":
### If Chromel, taken from Chromel_Seebeck.txt
if side == 'chromel':
if ( x >= 270 and x < 700):
alpha = -2467.61114613*x**0 + 55.6028987953*x**1 + \
-0.552110359087*x**2 + 0.00320554346691*x**3 + \
-1.20477254034e-05*x**4 + 3.06344710205e-08*x**5 + \
-5.33914758601e-11*x**6 + 6.30044607727e-14*x**7 + \
-4.8197269477e-17*x**8 + 2.15928374212e-20*x**9 + \
-4.30421084091e-24*x**10
#end if
elif ( x >= 700 and x < 1599):
alpha = 1165.13254764*x**0 + -9.49622421414*x**1 + \
0.0346344390853*x**2 + -7.27785048931e-05*x**3 + \
9.73981855547e-08*x**4 + -8.64369652227e-11*x**5 + \
5.10080771762e-14*x**6 + -1.93318725171e-17*x**7 + \
4.27299905603e-21*x**8 + -4.19761748937e-25*x**9
#end if
else:
print "Error in voltage correction, out of range."
#end if (Chromel)
### If Alumel, taken from Alumel_Seebeck.txt
elif side == 'alumel':
if ( x >= 270 and x < 570):
alpha = -3465.28789643*x**0 + 97.4007289124*x**1 + \
-1.17546754681*x**2 + 0.00801252041119*x**3 + \
-3.41263237031e-05*x**4 + 9.4391002358e-08*x**5 + \
-1.69831949233e-10*x**6 + 1.91977765586e-13*x**7 + \
-1.2391854625e-16*x**8 + 3.48576207577e-20*x**9
#end if
elif ( x >= 570 and x < 1599):
alpha = 254.644633774*x**0 + -2.17639940109*x**1 + \
0.00747127856327*x**2 + -1.41920634198e-05*x**3 + \
1.61971537881e-08*x**4 + -1.14428153299e-11*x**5 + \
4.969263632e-15*x**6 + -1.27526741699e-18*x**7 + \
1.80403838088e-22*x**8 + -1.23699936952e-26*x**9
#end if
else:
print "Error in voltage correction, out of range."
#end if (Alumel)
else:
print "Error in voltage correction."
#end if (K-type)
return alpha
#end def
#--------------------------------------------------------------------------
def updateGUI(self, stamp, data):
"""
Sends data to the GUI (main thread), for live updating while the process is running
in another thread.
"""
time.sleep(0.1)
wx.CallAfter(pub.sendMessage, stamp, msg=data)
#end def
#end class
###############################################################################
###############################################################################
class TakeData:
''' Takes measurements and saves them to file. '''
#--------------------------------------------------------------------------
def __init__(self):
global abort_ID
global k2700
global sampleApid
global sampleBpid
global blockApid
global blockBpid
global tolerance
global stability_threshold
global oscillation
global measureList
global dTlist
global timecalclist, Vchromelcalclist, Valumelcalclist, dTcalclist, avgTcalclist
self.k2700 = k2700
self.sampleApid = sampleApid
self.sampleBpid = sampleBpid
self.blockApid = blockApid
self.blockBpid = blockBpid
self.tolerance = tolerance
self.stability_threshold = stability_threshold
self.tol = 'NO'
self.stable = 'NO'
self.measurement = 'OFF'
self.measurement_indicator = 'none'
self.updateGUI(stamp='Measurement', data=self.measurement)
self.plotnumber = 0
self.exception_ID = 0
self.updateGUI(stamp='Status Bar', data='Running')
self.start = time.time()
print "start take data"
try:
while abort_ID == 0:
for avgtemp in measureList:
self.avgtemp = avgtemp
self.dT = 0
print "Set avg temp to %f" %(self.avgtemp)
print "set sample A temp to %f" %(self.avgtemp)
while True:
try:
self.sampleApid.set_setpoint(self.avgtemp)
break
except IOError:
print 'IOError: communication failure'
#end while
print "set sample B temp to %f" %(self.avgtemp)
while True:
try:
self.sampleBpid.set_setpoint(self.avgtemp)
break
except IOError:
print 'IOError: communication failure'
#end while
self.plotnumber +=1
timecalclist = []
Vchromelcalclist = []
Valumelcalclist = []
dTcalclist = []
avgTcalclist = []
self.recenttempA = []
self.recenttempAtime=[]
self.recenttempB = []
self.recenttempBtime=[]
self.stabilityA = '-'
self.stabilityB = '-'
self.updateGUI(stamp="Stability A", data=self.stabilityA)
self.updateGUI(stamp="Stability B", data=self.stabilityB)
self.take_temperature_Data()
self.take_voltage_Data()
self.check_tolerance()
condition = False
print 'start tolerance and stability loop'
while (not condition):
self.take_temperature_Data()
self.take_voltage_Data()
self.check_tolerance()
if abort_ID == 1: break
condition = (self.tol == 'OK' and self.stable == 'OK')
#end while
if abort_ID == 1: break
# vary dT
self.measurement_indicator = 'start'
for dT in dTlist:
self.dT = dT
print "Set dT to %f" %(self.dT)
# ramp to correct dT
print 'set sample pid A to %f' %(self.avgtemp+self.dT/2.0)
while True:
try:
self.sampleApid.set_setpoint(self.avgtemp+self.dT/2.0)
break
except IOError:
print 'IOError: communication failure'
#end while
print 'set sample pid B to %f' %(self.avgtemp-self.dT/2.0)
while True:
try:
self.sampleBpid.set_setpoint(self.avgtemp-self.dT/2.0)
break
except IOError:
print 'IOError: communication failure'
#end while
print 'reset stability'
self.recenttempA = []
self.recenttempAtime=[]
self.recenttempB = []
self.recenttempBtime=[]
self.stabilityA = '-'
self.stabilityB = '-'
self.updateGUI(stamp="Stability A", data=self.stabilityA)
self.updateGUI(stamp="Stability B", data=self.stabilityB)
condition = False
print 'start tolerance and stability loop'
while (not condition):
self.take_temperature_Data()
self.take_voltage_Data()
self.check_tolerance()
if abort_ID == 1: break
condition = (self.tol == 'OK' and self.stable == 'OK')
#end while
if abort_ID == 1: break
# start measurement
print 'begin seebeck measurement'
self.measurement = 'ON'
self.updateGUI(stamp='Measurement', data=self.measurement)
for i in range(4):
self.data_measurement()
if (self.dT == dTlist[-1] and i == 3):
self.measurement_indicator = 'stop'
self.write_data_to_file()
if abort_ID == 1: break
#end for
print 'end seebeck measurement'
self.measurement = 'OFF'
self.tol = 'NO'
self.stable = 'NO'
self.updateGUI(stamp='Measurement', data=self.measurement)
if abort_ID == 1: break
#end for
print 'process seebeck data'
self.process_data()
if abort_ID == 1: break
#end for
print 'huzzah! program finished'
abort_ID = 1
#end while
#end try
except exceptions.Exception as e:
log_exception(e)
abort_ID = 1
self.exception_ID = 1
print "Error Occurred, check error_log.log"
print e
#end except
if self.exception_ID == 1:
self.updateGUI(stamp='Status Bar', data='Exception Occurred')
#end if
else:
self.updateGUI(stamp='Status Bar', data='Finished, Ready')
#end else
print 'set sample A temp to 25'
while True:
try:
self.sampleApid.set_setpoint(25)
break
except IOError:
print 'IOError: communication failure'
#end while
print 'set sample B temp to 25'
while True:
try:
self.sampleBpid.set_setpoint(25)
break
except IOError:
print 'IOError: communication failure'
#end while
self.save_files()
wx.CallAfter(pub.sendMessage, 'Enable Buttons')
#end init
#--------------------------------------------------------------------------
def take_temperature_Data(self):
""" Takes data from the PID and proceeds to a
function that checks the PID setpoints.
"""
print 'take temperature data'
try:
# Take Data and time stamps:
self.sampletempA = float(self.sampleApid.get_pv())
time.sleep(0.1)
self.sampletempB = float(self.sampleBpid.get_pv())
time.sleep(0.1)
self.blocktempA = float(self.blockApid.get_pv())
time.sleep(0.1)
self.blocktempB = float(self.blockBpid.get_pv())
time.sleep(0.1)
# Get the current setpoints on the PID:
self.samplesetpointA = float(self.sampleApid.get_setpoint())
time.sleep(0.1)
self.samplesetpointB = float(self.sampleBpid.get_setpoint())
time.sleep(0.1)
except exceptions.ValueError as VE:
# Take Data and time stamps:
self.sampletempA = float(self.sampleApid.get_pv())
time.sleep(0.1)
self.sampletempB = float(self.sampleBpid.get_pv())
time.sleep(0.1)
self.blocktempA = float(self.blockApid.get_pv())
time.sleep(0.1)
self.blocktempB = float(self.blockBpid.get_pv())
time.sleep(0.1)
# Get the current setpoints on the PID:
self.samplesetpointA = float(self.sampleApid.get_setpoint())
time.sleep(0.1)
self.samplesetpointB = float(self.sampleBpid.get_setpoint())
time.sleep(0.1)
self.time_temperature = time.time() - self.start
print "\ntime: %.2f s\nsample temp A: %f C\nblock temp A: %f C\nsample temp B: %f C\nblock temp B: %f C" % (self.time_temperature, self.sampletempA, self.blocktempA, self.sampletempB, self.blocktempB)
#check stability of PID
if (len(self.recenttempA)<3):
self.recenttempA.append(self.sampletempA)
self.recenttempAtime.append(self.time_temperature)
#end if
else:
self.recenttempA.pop(0)
self.recenttempAtime.pop(0)
self.recenttempA.append(self.sampletempA)
self.recenttempAtime.append(self.time_temperature)
self.stabilityA = self.getStability(self.recenttempA,self.recenttempAtime)
print "stability A: %.4f C/min" % (self.stabilityA*60)
self.updateGUI(stamp="Stability A", data=self.stabilityA*60)
#end else
if (len(self.recenttempB)<3):
self.recenttempB.append(self.sampletempB)
self.recenttempBtime.append(self.time_temperature)
#end if
else:
self.recenttempB.pop(0)
self.recenttempBtime.pop(0)
self.recenttempB.append(self.sampletempB)
self.recenttempBtime.append(self.time_temperature)
self.stabilityB = self.getStability(self.recenttempB,self.recenttempBtime)
print "stability B: %.4f C/min" % (self.stabilityB*60)
self.updateGUI(stamp="Stability B", data=self.stabilityB*60)
#end else
self.updateGUI(stamp="Time Sample Temp A", data=self.time_temperature)
self.updateGUI(stamp="Time Sample Temp B", data=self.time_temperature)
self.updateGUI(stamp="Sample Temp A", data=self.sampletempA)
self.updateGUI(stamp="Sample Temp B", data=self.sampletempB)
self.updateGUI(stamp="Setpoint A", data=self.samplesetpointA)
self.updateGUI(stamp="Setpoint B", data=self.samplesetpointB)
self.updateGUI(stamp="Block Temp A", data=self.blocktempA)
self.updateGUI(stamp="Block Temp B", data=self.blocktempB)
self.updateGUI(stamp="Time Block Temp", data=self.time_temperature)
global rawfile
print('\nwrite temperatures to file\n')
rawfile.write('%.1f,'%(self.time_temperature))
rawfile.write('%.2f,%.2f,%.2f,' %(self.sampletempA,self.samplesetpointA,self.blocktempA))
rawfile.write(str(self.stabilityA)+',')
rawfile.write('%.2f,%.2f,%.2f,' %(self.sampletempB,self.samplesetpointB,self.blocktempB))
rawfile.write(str(self.stabilityB)+',')
self.safety_check()
#end def
#--------------------------------------------------------------------------
def safety_check(self):
global maxLimit
global abort_ID
print 'safety check'
if self.sampletempA > maxLimit:
abort_ID = 1
print 'Safety Failure: Sample Temp A greater than Max Limit'
#end if
if self.sampletempB > maxLimit:
abort_ID = 1
print 'Safety Failure: Sample Temp B greater than Max Limit'
#end if
if self.blocktempA > maxLimit:
abort_ID = 1
print 'Safety Failure: Block Temp A greater than Max Limit'
#end if
if self.blocktempB > maxLimit:
abort_ID = 1
print 'Safety Failure: Block Temp B greater than Max Limit'
#end if
if self.blocktempA > self.sampletempA + 100:
abort_ID = 1
print 'Safety Failure: Block Temp A 100 C greater than Sample Temp A'
#end if
if self.blocktempB > self.sampletempB + 100:
abort_ID = 1
print 'Safety Failure: Block Temp B 100 C greater than Sample Temp B'
#end if
if self.sampletempA > self.blocktempA + 100:
abort_ID = 1
print 'Safety Failure: Sample Temp A 100 C greater than Block Temp A'
#end if
if self.sampletempB > self.blocktempB + 100:
abort_ID = 1
print 'Safety Failure: Sample Temp B 100 C greater than Block Temp B'
#end if
#end def
#--------------------------------------------------------------------------
def take_voltage_Data(self):
print('take voltage data\n')
self.Vchromelraw = float(self.k2700.fetch(chromelChannel))*10**6
self.Vchromelcalc = self.voltage_Correction(self.Vchromelraw,self.sampletempA,self.sampletempB, 'chromel')
self.Valumelraw = float(self.k2700.fetch(alumelChannel))*10**6
self.Valumelcalc = self.voltage_Correction(self.Valumelraw,self.sampletempA,self.sampletempB, 'alumel')
self.time_voltage = time.time() - self.start
self.updateGUI(stamp="Time Chromel Voltage", data=float(self.time_voltage))
self.updateGUI(stamp="Time Alumel Voltage", data=float(self.time_voltage))
self.updateGUI(stamp="Chromel Voltage", data=float(self.Vchromelcalc))
self.updateGUI(stamp="Alumel Voltage", data=float(self.Valumelcalc))
print "\ntime: %f s\nvoltage (Chromel): %f uV\nvoltage (Alumel): %f uV" % (self.time_voltage, self.Vchromelcalc, self.Valumelcalc)
global rawfile
print('write voltages to file')
rawfile.write('%.3f,%.3f,%.3f,%.3f,'%(self.Vchromelraw, self.Vchromelcalc,self.Valumelraw, self.Valumelcalc))
#end def
#--------------------------------------------------------------------------
def getStability(self, temps, times):
coeffs = np.polyfit(times, temps, 1)
# Polynomial Coefficients
results = coeffs.tolist()
return results[0]
#end def
#--------------------------------------------------------------------------
def check_tolerance(self):
print 'check tolerance'
self.tolA = (np.abs(self.sampletempA-(self.avgtemp+self.dT/2.0)) < self.tolerance)
self.tolB = (np.abs(self.sampletempB-(self.avgtemp-self.dT/2.0)) < self.tolerance)
print 'tolerance A: ',self.tolA
print 'tolerance B:', self.tolB
if (self.tolA and self.tolB):
self.tol = 'OK'
#end if
else:
self.tol = 'NO'
#end else
print 'check stability'
if (self.stabilityA != '-'):
self.stableA = (np.abs(self.stabilityA) < self.stability_threshold)
print 'stable A: ',self.stableA
#end if
else:
self.stableA = False
print 'stable A: ',self.stableA
#end else
if (self.stabilityB != '-'):
self.stableB = (np.abs(self.stabilityB) < self.stability_threshold)
print 'stable B: ',self.stableB
#end if
else:
self.stableB = False
print 'stable B: ',self.stableB
#end else
if (self.stableA and self.stableB):
self.stable = 'OK'
#end if
else:
self.stable = 'NO'
#end else
print "\ntolerance: %s\nstable: %s\n" % (self.tol, self.stable)
#end else
#end elif
global rawfile
print('write status to file')
rawfile.write(str(self.tol)+','+str(self.stable)+'\n')
self.updateGUI(stamp="Status Bar", data=[self.tol, self.stable])
#end def
#--------------------------------------------------------------------------
def data_measurement(self):
global rawfile
print '\nseebeck data measurement'
# Takes and writes to file the data on the Keithley
# The only change between blocks like this one is the specific
# channel on the Keithley that is being measured.
self.sampletempA = float(self.sampleApid.get_pv())
self.time_sampletempA = time.time() - self.start
self.updateGUI(stamp="Time Sample Temp A", data=self.time_sampletempA)
self.updateGUI(stamp="Sample Temp A", data=self.sampletempA)
print "time: %.2f s\t sample temp A: %.2f C" % (self.time_sampletempA, self.sampletempA)
time.sleep(0.2)
self.sampletempB = float(self.sampleBpid.get_pv())
self.time_sampletempB = time.time() - self.start
self.updateGUI(stamp="Time Sample Temp B", data=self.time_sampletempB)
self.updateGUI(stamp="Sample Temp B", data=self.sampletempB)
print "time: %.2f s\ttempB: %.2f C" % (self.time_sampletempB, self.sampletempB)
time.sleep(0.2)
self.Vchromelraw = float(self.k2700.fetch(chromelChannel))*10**6
self.Vchromelcalc = self.voltage_Correction(self.Vchromelraw,self.sampletempA,self.sampletempB, 'chromel')
self.time_Vchromel = time.time() - self.start
self.updateGUI(stamp="Time Chromel Voltage", data=self.time_Vchromel)
self.updateGUI(stamp="Chromel Voltage", data=self.Vchromelcalc)
print "time: %.2f s\t voltage (Chromel) %f uV" % (self.time_Vchromel, self.Vchromelcalc)
time.sleep(0.2)
self.Valumelraw = float(self.k2700.fetch(alumelChannel))*10**6
self.Valumelcalc = self.voltage_Correction(self.Valumelraw,self.sampletempA,self.sampletempB, 'alumel')
self.time_Valumel = time.time() - self.start
self.updateGUI(stamp="Time Alumel Voltage", data=self.time_Valumel)
self.updateGUI(stamp="Alumel Voltage", data=self.Valumelcalc)
print "time: %.2f s\t voltage (Alumel) %f uV" % (self.time_Valumel, self.Valumelcalc)
time.sleep(0.2)
rawfile.write('%.1f,'%(self.time_sampletempA))
rawfile.write('%.2f,%.2f,%.2f,' %(self.sampletempA,self.samplesetpointA,self.blocktempA))
rawfile.write(str(self.stabilityA)+',')
rawfile.write('%.2f,%.2f,%.2f,' %(self.sampletempB,self.samplesetpointB,self.blocktempB))
rawfile.write(str(self.stabilityB)+',')
rawfile.write('%.3f,%.3f,%.3f,%.3f,'%(self.Vchromelraw, self.Vchromelcalc,self.Valumelraw, self.Valumelcalc))
rawfile.write(str(self.tol)+','+str(self.stable)+'\n')
print('Symmetrize the measurement and repeat')
self.Valumelraw2 = float(self.k2700.fetch(alumelChannel))*10**6
self.Valumelcalc2 = self.voltage_Correction(self.Valumelraw2,self.sampletempA,self.sampletempB, 'alumel')
self.time_Valumel2 = time.time() - self.start
self.updateGUI(stamp="Time Alumel Voltage", data=self.time_Valumel2)
self.updateGUI(stamp="Alumel Voltage", data=self.Valumelcalc2)
print "time: %.2f s\t voltage (Alumel) %f uV" % (self.time_Valumel2, self.Valumelcalc2)
time.sleep(0.2)
self.Vchromelraw2 = float(self.k2700.fetch(chromelChannel))*10**6
self.Vchromelcalc2 = self.voltage_Correction(self.Vchromelraw2,self.sampletempA,self.sampletempB, 'chromel')
self.time_Vchromel2 = time.time() - self.start
self.updateGUI(stamp="Time Chromel Voltage", data=self.time_Vchromel2)
self.updateGUI(stamp="Chromel Voltage", data=self.Vchromelcalc2)
print "time: %.2f s\t voltage (Chromel) %f uV" % (self.time_Vchromel2, self.Vchromelcalc2)
time.sleep(0.2)
self.sampletempB2 = float(self.sampleBpid.get_pv())
self.time_sampletempB2 = time.time() - self.start
self.updateGUI(stamp="Time Sample Temp B", data=self.time_sampletempB2)
self.updateGUI(stamp="Sample Temp B", data=self.sampletempB2)
print "time: %.2f s\ttempB: %.2f C" % (self.time_sampletempB2, self.sampletempB2)
time.sleep(0.2)
self.sampletempA2 = float(self.sampleApid.get_pv())
self.time_sampletempA2 = time.time() - self.start
self.updateGUI(stamp="Time Sample Temp A", data=self.time_sampletempA2)
self.updateGUI(stamp="Sample Temp A", data=self.sampletempA2)
print "time: %.2f s\t sample temp A: %.2f C" % (self.time_sampletempA2, self.sampletempA2)
rawfile.write('%.1f,'%(self.time_Valumel2))
rawfile.write('%.2f,%.2f,%.2f,' %(self.sampletempA2,self.samplesetpointA,self.blocktempA))
rawfile.write(str(self.stabilityA)+',')
rawfile.write('%.2f,%.2f,%.2f,' %(self.sampletempB2,self.samplesetpointB,self.blocktempB))
rawfile.write(str(self.stabilityB)+',')
rawfile.write('%.3f,%.3f,%.3f,%.3f,'%(self.Vchromelraw2, self.Vchromelcalc2,self.Valumelraw2, self.Valumelcalc2))
rawfile.write(str(self.tol)+','+str(self.stable)+'\n')
#end def
#--------------------------------------------------------------------------
def voltage_Correction(self, raw_voltage, tempA, tempB, side):
''' raw_data must be in uV '''
# Kelvin conversion for polynomial correction.
dT = tempA - tempB
avgT = (tempA + tempB)/2 + 273.15
# Correction for effect from Thermocouple Seebeck
out = self.alpha(avgT, side)*dT - raw_voltage
return out
#end def
#--------------------------------------------------------------------------
def alpha(self, x, side):
''' x = avgT
alpha in uV/K
'''
if tc_type == "k-type":
### If Chromel, taken from Chromel_Seebeck.txt
if side == 'chromel':
if ( x >= 270 and x < 700):
alpha = -2467.61114613*x**0 + 55.6028987953*x**1 + \
-0.552110359087*x**2 + 0.00320554346691*x**3 + \
-1.20477254034e-05*x**4 + 3.06344710205e-08*x**5 + \
-5.33914758601e-11*x**6 + 6.30044607727e-14*x**7 + \
-4.8197269477e-17*x**8 + 2.15928374212e-20*x**9 + \
-4.30421084091e-24*x**10
#end if
elif ( x >= 700 and x < 1599):
alpha = 1165.13254764*x**0 + -9.49622421414*x**1 + \
0.0346344390853*x**2 + -7.27785048931e-05*x**3 + \
9.73981855547e-08*x**4 + -8.64369652227e-11*x**5 + \
5.10080771762e-14*x**6 + -1.93318725171e-17*x**7 + \
4.27299905603e-21*x**8 + -4.19761748937e-25*x**9
#end if
else:
print "Error in voltage correction, out of range."
#end if (Chromel)
### If Alumel, taken from Alumel_Seebeck.txt
elif side == 'alumel':
if ( x >= 270 and x < 570):
alpha = -3465.28789643*x**0 + 97.4007289124*x**1 + \
-1.17546754681*x**2 + 0.00801252041119*x**3 + \
-3.41263237031e-05*x**4 + 9.4391002358e-08*x**5 + \
-1.69831949233e-10*x**6 + 1.91977765586e-13*x**7 + \
-1.2391854625e-16*x**8 + 3.48576207577e-20*x**9
#end if
elif ( x >= 570 and x < 1599):
alpha = 254.644633774*x**0 + -2.17639940109*x**1 + \
0.00747127856327*x**2 + -1.41920634198e-05*x**3 + \
1.61971537881e-08*x**4 + -1.14428153299e-11*x**5 + \
4.969263632e-15*x**6 + -1.27526741699e-18*x**7 + \
1.80403838088e-22*x**8 + -1.23699936952e-26*x**9
#end if
else:
print "Error in voltage correction, out of range."
#end if (Alumel)
else:
print "Error in voltage correction."
#end if (K-type)
return alpha
#end def
#--------------------------------------------------------------------------
def write_data_to_file(self):
global timecalclist, Vchromelcalclist, Valumelcalclist, dTcalclist, avgTcalclist
global myfile
print('\nWrite data to file\n')
time = (self.time_sampletempA + self.time_sampletempB + self.time_Valumel + self.time_Vchromel + self.time_sampletempA2 + self.time_sampletempB2 + self.time_Valumel2 + self.time_Vchromel2)/8
ta = (self.sampletempA + self.sampletempA2)/2
tb = (self.sampletempB + self.sampletempB2)/2
avgt = (ta + tb)/2
dt = ta-tb
vchromel = (self.Vchromelcalc + self.Vchromelcalc2)/2
valumel = (self.Valumelcalc + self.Valumelcalc2)/2
myfile.write('%.3f,' %(time))
myfile.write('%.4f,%.4f,%.4f,%.4f,' % (ta, tb, avgt, dt) )
myfile.write('%.6f,%.6f' % (vchromel,valumel))
timecalclist.append(time)
Vchromelcalclist.append(vchromel)
Valumelcalclist.append(valumel)
dTcalclist.append(dt)
avgTcalclist.append(avgt)
# indicates whether an oscillation has started or stopped
if self.measurement_indicator == 'start':
myfile.write(',Start Oscillation')
self.measurement_indicator = 'none'
elif self.measurement_indicator == 'stop':
myfile.write(',Stop Oscillation')
self.measurement_indicator = 'none'
elif self.measurement_indicator == 'none':
myfile.write(', ')
else:
myfile.write(', ')
myfile.write('\n')
#end def
#--------------------------------------------------------------------------
def updateGUI(self, stamp, data):
"""
Sends data to the GUI (main thread), for live updating while the process is running
in another thread.
"""
time.sleep(0.1)
wx.CallAfter(pub.sendMessage, stamp, msg=data)
#end def
#--------------------------------------------------------------------------
def process_data(self):
global timecalclist, Vchromelcalclist, Valumelcalclist, dTcalclist, avgTcalclist
global processfile
print '\nprocess data to get seebeck coefficient'
time = np.average(timecalclist)
avgT = np.average(avgTcalclist)
dTchromellist = dTcalclist
dTalumellist = dTcalclist
results_chromel = {}
results_alumel = {}
coeffs_chromel = np.polyfit(dTchromellist, Vchromelcalclist, 1)
coeffs_alumel = np.polyfit(dTalumellist,Valumelcalclist,1)
# Polynomial Coefficients
polynomial_chromel = coeffs_chromel.tolist()
polynomial_alumel = coeffs_alumel.tolist()
seebeck_chromel = polynomial_chromel[0]
offset_chromel = polynomial_chromel[1]
seebeck_alumel = polynomial_alumel[0]
offset_alumel = polynomial_alumel[1]
# Calculate coefficient of determination (r-squared):
p_chromel = np.poly1d(coeffs_chromel)
p_alumel = np.poly1d(coeffs_alumel)
# fitted values:
yhat_chromel = p_chromel(dTchromellist)
yhat_alumel = p_alumel(dTalumellist)
# mean of values:
ybar_chromel = np.sum(Vchromelcalclist)/len(Vchromelcalclist)
ybar_alumel = np.sum(Valumelcalclist)/len(Valumelcalclist)
# regression sum of squares:
ssreg_chromel = np.sum((yhat_chromel-ybar_chromel)**2) # or sum([ (yihat - ybar)**2 for yihat in yhat])
ssreg_alumel = np.sum((yhat_alumel-ybar_alumel)**2)
# total sum of squares:
sstot_chromel = np.sum((Vchromelcalclist - ybar_chromel)**2)
sstot_alumel = np.sum((Valumelcalclist - ybar_alumel)**2) # or sum([ (yi - ybar)**2 for yi in y])
rsquared_chromel = ssreg_chromel / sstot_chromel
rsquared_alumel = ssreg_alumel / sstot_alumel
processfile.write('%.3f,%.5f,%.5f,%.5f,%.5f,%.5f,%.5f,%.5f\n'%(time,avgT,seebeck_chromel,offset_chromel,rsquared_chromel,seebeck_alumel,offset_alumel,rsquared_alumel))
fitchromel = {}
fitalumel = {}
fitchromel['polynomial'] = polynomial_chromel
fitalumel['polynomial'] = polynomial_alumel
fitchromel['r-squared'] = rsquared_chromel
fitalumel['r-squared'] = rsquared_alumel
celsius = u"\u2103"
self.create_plot(dTalumellist,dTchromellist,Valumelcalclist,Vchromelcalclist,fitalumel,fitchromel,str(self.plotnumber)+'_'+str(avgT)+ 'C')
self.updateGUI(stamp="Chromel Seebeck", data=seebeck_chromel)
self.updateGUI(stamp="Alumel Seebeck", data=seebeck_alumel)
#end def
#--------------------------------------------------------------------------
def create_plot(self, xalumel, xchromel, yalumel, ychromel, fitalumel, fitchromel, title):
global filePath
print 'create seebeck plot'
dpi = 400
plt.ioff()
# Create Plot:
fig = plt.figure(self.plotnumber, dpi=dpi)
ax = fig.add_subplot(111)
ax.grid()
ax.set_title(title)
ax.set_xlabel("dT (K)")
ax.set_ylabel("dV (uV)")
# Plot data points:
ax.scatter(xalumel, yalumel, color='r', marker='.', label="alumel Voltage")
ax.scatter(xchromel, ychromel, color='b', marker='.', label="chromel Voltage")
# Overlay linear fits:
coeffsalumel = fitalumel['polynomial']
coeffschromel = fitchromel['polynomial']
p_alumel = np.poly1d(coeffsalumel)
p_chromel = np.poly1d(coeffschromel)
xp = np.linspace(min(xalumel+xchromel), max(xalumel+xchromel), 5000)
alumel_eq = 'dV = %.2f*(dT) + %.2f' % (coeffsalumel[0], coeffsalumel[1])
chromel_eq = 'dV = %.2f*(dT) + %.2f' % (coeffschromel[0], coeffschromel[1])
ax.plot(xp, p_alumel(xp), '-', c='#FF9900', label="alumel Voltage Fit\n %s" % alumel_eq)
ax.plot(xp, p_chromel(xp), '-', c='g', label="chromel Voltage Fit\n %s" % chromel_eq)
ax.legend(loc='upper left', fontsize='10')
# Save:
plot_folder = filePath + '/Seebeck Plots/'
if not os.path.exists(plot_folder):
os.makedirs(plot_folder)
fig.savefig('%s.png' % (plot_folder + title) , dpi=dpi)
#plt.close()
#end def
#--------------------------------------------------------------------------
def save_files(self):
''' Function saving the files after the data acquisition loop has been
exited.
'''
print('Save Files')
global myfile
global rawfile
global processfile
myfile.close() # Close the file
rawfile.close()
processfile.close()
# Save the GUI plots
global save_plots_ID
save_plots_ID = 1
self.updateGUI(stamp='Save_All', data='Save')
#end def
#end class
###############################################################################
###############################################################################
class BoundControlBox(wx.Panel):
""" A static box with a couple of radio buttons and a text
box. Alalumels to switch between an automatic mode and a
manual mode with an associated value.
"""
#--------------------------------------------------------------------------
def __init__(self, parent, ID, label, initval):
wx.Panel.__init__(self, parent, ID)
self.value = initval
box = wx.StaticBox(self, -1, label)
sizer = wx.StaticBoxSizer(box, wx.VERTICAL)
self.radio_auto = wx.RadioButton(self, -1, label="Auto", style=wx.RB_GROUP)
self.radio_manual = wx.RadioButton(self, -1, label="Manual")
self.manual_text = wx.TextCtrl(self, -1,
size=(30,-1),
value=str(initval),
style=wx.TE_PROCESS_ENTER)
self.Bind(wx.EVT_UPDATE_UI, self.on_update_manual_text, self.manual_text)
self.Bind(wx.EVT_TEXT_ENTER, self.on_text_enter, self.manual_text)
manual_box = wx.BoxSizer(wx.HORIZONTAL)
manual_box.Add(self.radio_manual, flag=wx.ALIGN_CENTER_VERTICAL)
manual_box.Add(self.manual_text, flag=wx.ALIGN_CENTER_VERTICAL)
sizer.Add(self.radio_auto, 0, wx.ALL, 10)
sizer.Add(manual_box, 0, wx.ALL, 10)
self.SetSizer(sizer)
sizer.Fit(self)
#end init
#--------------------------------------------------------------------------
def on_update_manual_text(self, event):
self.manual_text.Enable(self.radio_manual.GetValue())
#end def
#--------------------------------------------------------------------------
def on_text_enter(self, event):
self.value = self.manual_text.GetValue()
#end def
#--------------------------------------------------------------------------
def is_auto(self):
return self.radio_auto.GetValue()
#end def
#--------------------------------------------------------------------------
def manual_value(self):
return self.value
#end def
#end class
###############################################################################
###############################################################################
class UserPanel(wx.Panel):
''' User Input Panel '''
#--------------------------------------------------------------------------
def __init__(self, *args, **kwargs):
wx.Panel.__init__(self, *args, **kwargs)
global tolerance
global oscillation
global stability_threshold
self.oscillation = oscillation
self.tolerance = tolerance
self.stability_threshold = stability_threshold*60
self.create_title("User Panel") # Title
self.celsius = u"\u2103"
self.font2 = wx.Font(11, wx.DEFAULT, wx.NORMAL, wx.NORMAL)
self.oscillation_control() # Oscillation range control
self.tolerance_control() # PID tolerance level Control
self.stability_control() # PID stability threshold control
self.measurementListBox()
self.maxLimit_label()
self.linebreak1 = wx.StaticLine(self, pos=(-1,-1), size=(300,1))
self.linebreak2 = wx.StaticLine(self, pos=(-1,-1), size=(300,1))
self.linebreak3 = wx.StaticLine(self, pos=(-1,-1), size=(300,1))
self.linebreak4 = wx.StaticLine(self, pos=(-1,-1), size=(600,1), style=wx.LI_HORIZONTAL)
self.run_stop() # Run and Stop buttons
self.create_sizer() # Set Sizer for panel
pub.subscribe(self.enable_buttons, "Enable Buttons")
#end init
#--------------------------------------------------------------------------
def create_title(self, name):
self.titlePanel = wx.Panel(self, -1)
title = wx.StaticText(self.titlePanel, label=name)
font_title = wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.BOLD)
title.SetFont(font_title)
hbox = wx.BoxSizer(wx.HORIZONTAL)
hbox.Add((0,-1))
hbox.Add(title, 0, wx.LEFT, 5)
self.titlePanel.SetSizer(hbox)
#end def
#--------------------------------------------------------------------------
def run_stop(self):
self.run_stopPanel = wx.Panel(self, -1)
rs_sizer = wx.GridBagSizer(3, 3)
self.btn_check = btn_check = wx.Button(self.run_stopPanel, label='check', style=0, size=(60,30)) # Initial Status Button
btn_check.SetBackgroundColour((0,0,255))
caption_check = wx.StaticText(self.run_stopPanel, label='*check inital status')
self.btn_run = btn_run = wx.Button(self.run_stopPanel, label='run', style=0, size=(60,30)) # Run Button
btn_run.SetBackgroundColour((0,255,0))
caption_run = wx.StaticText(self.run_stopPanel, label='*run measurement')
self.btn_stop = btn_stop = wx.Button(self.run_stopPanel, label='stop', style=0, size=(60,30)) # Stop Button
btn_stop.SetBackgroundColour((255,0,0))
caption_stop = wx.StaticText(self.run_stopPanel, label = '*quit operation')
btn_check.Bind(wx.EVT_BUTTON, self.check)
btn_run.Bind(wx.EVT_BUTTON, self.run)
btn_stop.Bind(wx.EVT_BUTTON, self.stop)
controlPanel = wx.StaticText(self.run_stopPanel, label='Control Panel')
controlPanel.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.BOLD))
rs_sizer.Add(controlPanel,(0,0), span=(1,3),flag=wx.ALIGN_CENTER_HORIZONTAL)
rs_sizer.Add(btn_check,(1,0),flag=wx.ALIGN_CENTER_HORIZONTAL)
rs_sizer.Add(caption_check,(2,0),flag=wx.ALIGN_CENTER_HORIZONTAL)
rs_sizer.Add(btn_run,(1,1),flag=wx.ALIGN_CENTER_HORIZONTAL)
rs_sizer.Add(caption_run,(2,1),flag=wx.ALIGN_CENTER_HORIZONTAL)
rs_sizer.Add(btn_stop,(1,2),flag=wx.ALIGN_CENTER_HORIZONTAL)
rs_sizer.Add(caption_stop,(2,2),flag=wx.ALIGN_CENTER_HORIZONTAL)
self.run_stopPanel.SetSizer(rs_sizer)
btn_stop.Disable()
# end def
#--------------------------------------------------------------------------
def check(self, event):
InitialCheck()
#end def
#--------------------------------------------------------------------------
def run(self, event):
global dataFile
global statusFile
global seebeckFile
global myfile
global rawfile
global processfile
global measureList
global dTlist
global abort_ID
measureList = [None]*self.listbox.GetCount()
for k in xrange(self.listbox.GetCount()):
measureList[k] = int(self.listbox.GetString(k))
#end for
if (len(measureList) > 0 and len(dTlist) > 0 ):
try:
self.name_folder()
if self.run_check == wx.ID_OK:
myfile = open(dataFile, 'w') # opens file for writing/overwriting
rawfile = open(statusFile,'w')
processfile = open(seebeckFile,'w')
begin = datetime.now() # Current date and time
myfile.write('Seebeck Data File\nStart Time: ' + str(begin) + '\n')
rawfile.write('System Status\nStart Time: ' + str(begin) + '\n')
processfile.write('Processed Seebeck Coefficent\nStart Time: ' + str(begin) + '\n')
dataheaders = 'time (s), tempA (C), tempB (C), avgtemp (C), deltatemp (C), Vchromel (uV), Valumel (uV), indicator\n'
myfile.write(dataheaders)
rawheaders1 = 'time (s), sampletempA (C), samplesetpointA (C), blocktempA (C), stabilityA (C/min), sampletempB (C), samplesetpointB (C), blocktempB (C), stabilityB (C/min),'
rawheaders2 = 'chromelvoltageraw (uV), chromelvoltagecalc (uV), alumelvoltageraw(C), alumelvoltagecalc (uV), tolerance, stability\n'
rawfile.write(rawheaders1 + rawheaders2)
processheaders = 'time(s),temperature (C),seebeck_chromel (uV/K),offset_chromel (uV),R^2_chromel,seebeck_alumel (uV/K),offset_alumel (uV),R^2_alumel\n'
processfile.write(processheaders)
abort_ID = 0
self.btn_osc.Disable()
self.btn_tol.Disable()
self.btn_stab.Disable()
self.btn_new.Disable()
self.btn_ren.Disable()
self.btn_dlt.Disable()
self.btn_clr.Disable()
self.btn_check.Disable()
self.btn_run.Disable()
self.btn_stop.Enable()
#start the threading process
thread = ProcessThread()
#end if
#end try
except visa.VisaIOError:
wx.MessageBox("Not all instruments are connected!", "Error")
#end except
#end if
#end def
#--------------------------------------------------------------------------
def name_folder(self):
question = wx.MessageDialog(None, 'The data files are saved into a folder upon ' + \
'completion. \nBy default, the folder will be named with a time stamp.\n\n' + \
'Would you like to name your own folder?', 'Question',
wx.YES_NO | wx.NO_DEFAULT | wx.ICON_QUESTION)
answer = question.ShowModal()
if answer == wx.ID_YES:
self.folder_name = wx.GetTextFromUser('Enter the name of your folder.\n' + \
'Only type in a name, NOT a file path.')
if self.folder_name == "":
wx.MessageBox("Canceled")
else:
self.choose_dir()
#end if
else:
date = str(datetime.now())
self.folder_name = 'Seebeck Data %s.%s.%s' % (date[0:13], date[14:16], date[17:19])
self.choose_dir()
#end else
#end def
#--------------------------------------------------------------------------
def choose_dir(self):
found = False
dlg = wx.DirDialog (None, "Choose the directory to save your files.", "",
wx.DD_DEFAULT_STYLE)
self.run_check = dlg.ShowModal()
if self.run_check == wx.ID_OK:
global filePath
filePath = dlg.GetPath()
filePath = filePath + '/' + self.folder_name
if not os.path.exists(filePath):
os.makedirs(filePath)
os.chdir(filePath)
else:
n = 1
while found == False:
path = filePath + ' - ' + str(n)
if os.path.exists(path):
n = n + 1
else:
os.makedirs(path)
os.chdir(path)
n = 1
found = True
#end while
#end else
#end if
# Set the global path to the newly created path, if applicable.
if found == True:
filePath = path
#end if
#end def
#--------------------------------------------------------------------------
def stop(self, event):
global abort_ID
abort_ID = 1
self.enable_buttons
#end def
#--------------------------------------------------------------------------
def oscillation_control(self):
self.oscPanel = wx.Panel(self, -1)
hbox = wx.BoxSizer(wx.HORIZONTAL)
self.label_osc = wx.StaticText(self, label="PID Oscillaton (%s):"% self.celsius)
self.text_osc = text_osc = wx.StaticText(self.oscPanel, label=str(self.oscillation) + ' '+self.celsius)
text_osc.SetFont(self.font2)
self.edit_osc = edit_osc = wx.TextCtrl(self.oscPanel, size=(40, -1))
self.btn_osc = btn_osc = wx.Button(self.oscPanel, label="save", size=(40, -1))
text_guide_osc = wx.StaticText(self.oscPanel, label="The PID will oscillate within this \ndegree range when oscillating at \na measurement.")
btn_osc.Bind(wx.EVT_BUTTON, self.save_oscillation)
hbox.Add((0, -1))
hbox.Add(text_osc, 0, wx.LEFT, 5)
hbox.Add(edit_osc, 0, wx.LEFT, 40)
hbox.Add(btn_osc, 0, wx.LEFT, 5)
hbox.Add(text_guide_osc, 0, wx.LEFT, 5)
self.oscPanel.SetSizer(hbox)
#end def
#--------------------------------------------------------------------------
def save_oscillation(self, e):
global oscillation
global dTlist
try:
self.oscillation = self.edit_osc.GetValue()
if float(self.oscillation) > maxLimit:
self.oscillation = str(maxLimit)
self.text_osc.SetLabel(self.oscillation)
oscillation = float(self.oscillation)
#dTlist = [oscillation*i/4 for i in range(0,-5,-1)+range(-3,5)+range(3,-1,-1)]
dTlist = [oscillation*i/2 for i in range(0,-3,-1)+range(-1,3)+range(1,-1,-1)]
except ValueError:
wx.MessageBox("Invalid input. Must be a number.", "Error")
#end def
#--------------------------------------------------------------------------
def tolerance_control(self):
self.tolPanel = wx.Panel(self, -1)
hbox = wx.BoxSizer(wx.HORIZONTAL)
self.label_tol = wx.StaticText(self, label="Tolerance ("+self.celsius+")")
self.text_tol = text_tol = wx.StaticText(self.tolPanel, label=str(self.tolerance) + ' '+self.celsius)
text_tol.SetFont(self.font2)
self.edit_tol = edit_tol = wx.TextCtrl(self.tolPanel, size=(40, -1))
self.btn_tol = btn_tol = wx.Button(self.tolPanel, label="save", size=(40, -1))
text_guide_tol = wx.StaticText(self.tolPanel, label="The tolerance within the\nPID set points necessary\nto start a measurement")
btn_tol.Bind(wx.EVT_BUTTON, self.save_tolerance)
hbox.Add((0, -1))
hbox.Add(text_tol, 0, wx.LEFT, 5)
hbox.Add(edit_tol, 0, wx.LEFT, 40)
hbox.Add(btn_tol, 0, wx.LEFT, 5)
hbox.Add(text_guide_tol, 0, wx.LEFT, 5)
self.tolPanel.SetSizer(hbox)
#end def
#--------------------------------------------------------------------------
def save_tolerance(self, e):
global tolerance
global oscillation
try:
self.tolerance = self.edit_tol.GetValue()
if float(self.tolerance) > oscillation:
self.tolerance = str(oscillation-1)
self.text_tol.SetLabel(self.tolerance)
tolerance = float(self.tolerance)
except ValueError:
wx.MessageBox("Invalid input. Must be a number.", "Error")
#end def
#--------------------------------------------------------------------------
def stability_control(self):
self.stability_threshold_Panel = wx.Panel(self, -1)
hbox = wx.BoxSizer(wx.HORIZONTAL)
self.label_stability_threshold = wx.StaticText(self, label="Stability Threshold ("+self.celsius+"/min)")
self.text_stability_threshold = text_stability_threshold = wx.StaticText(self.stability_threshold_Panel, label=str(self.stability_threshold) + ' '+self.celsius+'/min')
text_stability_threshold.SetFont(self.font2)
self.edit_stability_threshold = edit_stability_threshold = wx.TextCtrl(self.stability_threshold_Panel, size=(40, -1))
self.btn_stab = btn_stab = wx.Button(self.stability_threshold_Panel, label="save", size=(40, -1))
text_guide_stability_threshold = wx.StaticText(self.stability_threshold_Panel, label='The change in the PID must\nbe bealumel this threshold before\na measurement will begin.')
btn_stab.Bind(wx.EVT_BUTTON, self.save_stability_threshold)
hbox.Add((0, -1))
hbox.Add(text_stability_threshold, 0, wx.LEFT, 5)
hbox.Add(edit_stability_threshold, 0, wx.LEFT, 40)
hbox.Add(btn_stab, 0, wx.LEFT, 5)
hbox.Add(text_guide_stability_threshold, 0, wx.LEFT, 5)
self.stability_threshold_Panel.SetSizer(hbox)
#end def
#--------------------------------------------------------------------------
def save_stability_threshold(self, e):
global stability_threshold
try:
self.stability_threshold = self.edit_stability_threshold.GetValue()
self.text_stability_threshold.SetLabel(self.stability_threshold)
stability_threshold = float(self.stability_threshold)/60
except ValueError:
wx.MessageBox("Invalid input. Must be a number.", "Error")
#end def
#--------------------------------------------------------------------------
def measurementListBox(self):
# ids for measurement List Box
ID_NEW = 1
ID_CHANGE = 2
ID_CLEAR = 3
ID_DELETE = 4
self.measurementPanel = wx.Panel(self, -1)
hbox = wx.BoxSizer(wx.HORIZONTAL)
self.label_measurements = wx.StaticText(self,
label="Measurements (%s):"
% self.celsius
)
self.label_measurements.SetFont(self.font2)
self.listbox = wx.ListBox(self.measurementPanel, size=(75,150))
btnPanel = wx.Panel(self.measurementPanel, -1)
vbox = wx.BoxSizer(wx.VERTICAL)
self.btn_new = new = wx.Button(btnPanel, ID_NEW, 'New', size=(50, 20))
self.btn_ren = ren = wx.Button(btnPanel, ID_CHANGE, 'Change', size=(50, 20))
self.btn_dlt = dlt = wx.Button(btnPanel, ID_DELETE, 'Delete', size=(50, 20))
self.btn_clr = clr = wx.Button(btnPanel, ID_CLEAR, 'Clear', size=(50, 20))
self.Bind(wx.EVT_BUTTON, self.NewItem, id=ID_NEW)
self.Bind(wx.EVT_BUTTON, self.OnRename, id=ID_CHANGE)
self.Bind(wx.EVT_BUTTON, self.OnDelete, id=ID_DELETE)
self.Bind(wx.EVT_BUTTON, self.OnClear, id=ID_CLEAR)
self.Bind(wx.EVT_LISTBOX_DCLICK, self.OnRename)
vbox.Add((-1, 5))
vbox.Add(new)
vbox.Add(ren, 0, wx.TOP, 5)
vbox.Add(dlt, 0, wx.TOP, 5)
vbox.Add(clr, 0, wx.TOP, 5)
btnPanel.SetSizer(vbox)
#hbox.Add(self.label_measurements, 0, wx.LEFT, 5)
hbox.Add(self.listbox, 1, wx.ALL, 5)
hbox.Add(btnPanel, 0, wx.RIGHT, 5)
self.measurementPanel.SetSizer(hbox)
#end def
#--------------------------------------------------------------------------
def NewItem(self, event):
text = wx.GetTextFromUser('Enter a new measurement', 'Insert dialog')
if text != '':
self.listbox.Append(text)
time.sleep(0.2)
self.listbox_max_limit(maxLimit)
#end def
#--------------------------------------------------------------------------
def OnRename(self, event):
sel = self.listbox.GetSelection()
text = self.listbox.GetString(sel)
renamed = wx.GetTextFromUser('Rename item', 'Rename dialog', text)
if renamed != '':
self.listbox.Delete(sel)
self.listbox.Insert(renamed, sel)
self.listbox_max_limit(maxLimit)
#end def
#--------------------------------------------------------------------------
def OnDelete(self, event):
sel = self.listbox.GetSelection()
if sel != -1:
self.listbox.Delete(sel)
self.listbox_max_limit(maxLimit)
#end def
#--------------------------------------------------------------------------
def OnClear(self, event):
self.listbox.Clear()
self.listbox_max_limit(maxLimit)
#end def
#--------------------------------------------------------------------------
def listbox_max_limit(self, limit):
""" Sets user input to only alalumel a maximum temperature. """
mlist = [None]*self.listbox.GetCount()
for i in xrange(self.listbox.GetCount()):
mlist[i] = int(self.listbox.GetString(i))
if mlist[i] > limit:
self.listbox.Delete(i)
self.listbox.Insert(str(limit), i)
#end def
#--------------------------------------------------------------------------
def maxLimit_label(self):
self.maxLimit_Panel = wx.Panel(self, -1)
maxLimit_label = wx.StaticText(self.maxLimit_Panel, label='Max Limit Temp:')
maxLimit_text = wx.StaticText(self.maxLimit_Panel, label='%s %s' % (str(maxLimit), self.celsius))
hbox = wx.BoxSizer(wx.HORIZONTAL)
hbox.Add((0,-1))
hbox.Add(maxLimit_label, 0, wx.LEFT, 5)
hbox.Add(maxLimit_text, 0, wx.LEFT, 5)
self.maxLimit_Panel.SetSizer(hbox)
#edn def
#--------------------------------------------------------------------------
def create_sizer(self):
sizer = wx.GridBagSizer(8,2)
sizer.Add(self.titlePanel, (0, 1), span=(1,2), flag=wx.ALIGN_CENTER_HORIZONTAL)
sizer.Add(self.label_osc, (1, 1))
sizer.Add(self.oscPanel, (1, 2))
sizer.Add(self.label_tol, (2,1))
sizer.Add(self.tolPanel, (2, 2))
sizer.Add(self.label_stability_threshold, (3,1))
sizer.Add(self.stability_threshold_Panel, (3, 2))
sizer.Add(self.label_measurements, (4,1))
sizer.Add(self.measurementPanel, (4, 2))
sizer.Add(self.maxLimit_Panel, (5, 1), span=(1,2))
sizer.Add(self.linebreak4, (6,1),span = (1,2))
sizer.Add(self.run_stopPanel, (7,1),span = (1,2), flag=wx.ALIGN_CENTER_HORIZONTAL)
self.SetSizer(sizer)
#end def
#--------------------------------------------------------------------------
def enable_buttons(self):
self.btn_check.Enable()
self.btn_run.Enable()
self.btn_osc.Enable()
self.btn_tol.Enable()
self.btn_stab.Enable()
self.btn_ren.Enable()
self.btn_dlt.Enable()
self.btn_clr.Enable()
self.btn_stop.Disable()
#end def
#end class
###############################################################################
###############################################################################
class StatusPanel(wx.Panel):
"""
Current Status of Measurements
"""
#--------------------------------------------------------------------------
def __init__(self, *args, **kwargs):
wx.Panel.__init__(self, *args, **kwargs)
self.celsius = u"\u2103"
self.delta = u"\u0394"
self.mu = u"\u00b5"
self.ctime = str(datetime.now())[11:19]
self.t='0:00:00'
self.chromelV=str(0)
self.alumelV = str(0)
self.sampletempA=str(30)
self.sampletempB=str(30)
self.blocktempA=str(30)
self.blocktempB=str(30)
self.samplesetpointA=str(30)
self.samplesetpointB=str(30)
self.stabilityA = '-'
self.stabilityB = '-'
self.dT = str(float(self.sampletempA)-float(self.sampletempB))
self.avgT = str((float(self.sampletempA)+float(self.sampletempB))/2)
self.seebeckchromel = '-'
self.seebeckalumel = '-'
self.mea = '-'
self.create_title("Status Panel")
self.create_status()
self.linebreak1 = wx.StaticLine(self, pos=(-1,-1), size=(300,1))
self.linebreak2 = wx.StaticLine(self, pos=(-1,-1), size=(300,1))
self.linebreak3 = wx.StaticLine(self, pos=(-1,-1), size=(300,1))
self.linebreak4 = wx.StaticLine(self, pos=(-1,-1), size=(300,1))
self.linebreak5 = wx.StaticLine(self, pos=(-1,-1), size=(300,1))
self.linebreak6 = wx.StaticLine(self, pos=(-1,-1), size=(300,1))
self.linebreak7 = wx.StaticLine(self, pos=(-1,-1), size=(300,1))
self.linebreak8 = wx.StaticLine(self, pos=(-1,-1), size=(300,1))
# Updates from running program
pub.subscribe(self.OnTime, "Time Chromel Voltage")
pub.subscribe(self.OnTime, "Time Alumel Voltage")
pub.subscribe(self.OnTime, "Time Sample Temp A")
pub.subscribe(self.OnTime, "Time Sample Temp B")
pub.subscribe(self.OnChromelVoltage, "Chromel Voltage")
pub.subscribe(self.OnAlumelVoltage, "Alumel Voltage")
pub.subscribe(self.OnSampleTempA, "Sample Temp A")
pub.subscribe(self.OnSampleTempB, "Sample Temp B")
pub.subscribe(self.OnBlockTempA, "Block Temp A")
pub.subscribe(self.OnBlockTempB, "Block Temp B")
pub.subscribe(self.OnSetpointA, "Setpoint A")
pub.subscribe(self.OnSetpointB, "Setpoint B")
pub.subscribe(self.OnStabilityA, "Stability A")
pub.subscribe(self.OnStabilityB, "Stability B")
pub.subscribe(self.OnMeasurement, 'Measurement')
pub.subscribe(self.OnSeebeckchromel, "Chromel Seebeck")
pub.subscribe(self.OnSeebeckalumel, "Alumel Seebeck")
# Updates from inital check
pub.subscribe(self.OnChromelVoltage, "Chromel Voltage Init")
pub.subscribe(self.OnAlumelVoltage, "Alumel Voltage Init")
pub.subscribe(self.OnSampleTempA, "Sample Temp A Init")
pub.subscribe(self.OnSampleTempB, "Sample Temp B Init")
pub.subscribe(self.OnBlockTempA, "Block Temp A Init")
pub.subscribe(self.OnBlockTempB, "Block Temp B Init")
pub.subscribe(self.OnSetpointA, "Setpoint A Init")
pub.subscribe(self.OnSetpointB, "Setpoint B Init")
#self.update_values()
self.create_sizer()
#end init
#--------------------------------------------------------------------------
def OnChromelVoltage(self, msg):
self.chromelV = '%.1f'%(float(msg))
self.update_values()
#end def
#--------------------------------------------------------------------------
def OnAlumelVoltage(self, msg):
self.alumelV = '%.1f'%(float(msg))
self.update_values()
#end def
#--------------------------------------------------------------------------
def OnSampleTempA(self, msg):
self.sampletempA = '%.1f'%(float(msg))
self.dT = str(float(self.sampletempA)-float(self.sampletempB))
self.avgT = str((float(self.sampletempA)+float(self.sampletempB))/2)
self.update_values()
#end def
#--------------------------------------------------------------------------
def OnSampleTempB(self, msg):
self.sampletempB = '%.1f'%(float(msg))
self.dT = str(float(self.sampletempA)-float(self.sampletempB))
self.avgT = str((float(self.sampletempA)+float(self.sampletempB))/2)
self.update_values()
#end def
#--------------------------------------------------------------------------
def OnBlockTempA(self, msg):
self.blocktempA = '%.1f'%(float(msg))
self.update_values()
#end def
#--------------------------------------------------------------------------
def OnBlockTempB(self, msg):
self.blocktempB = '%.1f'%(float(msg))
self.update_values()
#end def
#--------------------------------------------------------------------------
def OnSetpointA(self, msg):
self.samplesetpointA = '%.1f'%(float(msg))
self.update_values()
#end def
#--------------------------------------------------------------------------
def OnSetpointB(self, msg):
self.samplesetpointB = '%.1f'%(float(msg))
self.update_values()
#end def
#--------------------------------------------------------------------------
def OnStabilityA(self, msg):
if msg != '-':
self.stabilityA = '%.2f'%(float(msg))
else:
self.stabilityA = msg
self.update_values()
#end def
#--------------------------------------------------------------------------
def OnStabilityB(self, msg):
if msg != '-':
self.stabilityB = '%.2f'%(float(msg))
else:
self.stabilityB = msg
self.update_values()
#end def
#--------------------------------------------------------------------------
def OnSeebeckchromel(self, msg):
self.seebeckchromel = '%.2f'%(float(msg))
self.update_values()
#end def
#--------------------------------------------------------------------------
def OnSeebeckalumel(self, msg):
self.seebeckalumel = '%.2f'%(float(msg))
self.update_values()
#end def
#--------------------------------------------------------------------------
def OnMeasurement(self, msg):
self.mea = msg
self.update_values()
#end def
#--------------------------------------------------------------------------
def OnTime(self, msg):
time = int(float(msg))
hours = str(time/3600)
minutes = int(time%3600/60)
if (minutes < 10):
minutes = '0%i'%(minutes)
else:
minutes = '%i'%(minutes)
seconds = int(time%60)
if (seconds < 10):
seconds = '0%i'%(seconds)
else:
seconds = '%i'%(seconds)
self.t = '%s:%s:%s'%(hours,minutes,seconds)
self.ctime = str(datetime.now())[11:19]
self.update_values()
#end def
#--------------------------------------------------------------------------
def create_title(self, name):
self.titlePanel = wx.Panel(self, -1)
title = wx.StaticText(self.titlePanel, label=name)
font_title = wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.BOLD)
title.SetFont(font_title)
hbox = wx.BoxSizer(wx.HORIZONTAL)
hbox.Add((0,-1))
hbox.Add(title, 0, wx.LEFT, 5)
self.titlePanel.SetSizer(hbox)
#end def
#--------------------------------------------------------------------------
def create_status(self):
self.label_ctime = wx.StaticText(self, label="current time:")
self.label_ctime.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.label_t = wx.StaticText(self, label="run time (s):")
self.label_t.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.label_chromelV = wx.StaticText(self, label="voltage (chromel) ("+self.mu+"V):")
self.label_chromelV.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.label_alumelV = wx.StaticText(self, label="voltage (alumel) ("+self.mu+"V):")
self.label_alumelV.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.label_sampletempA = wx.StaticText(self, label="sample temp A ("+self.celsius+"):")
self.label_sampletempA.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.label_sampletempB = wx.StaticText(self, label="sample temp B ("+self.celsius+"):")
self.label_sampletempB.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.label_blocktempA = wx.StaticText(self, label="block temp A ("+self.celsius+"):")
self.label_blocktempA.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.label_blocktempB = wx.StaticText(self, label="block temp B ("+self.celsius+"):")
self.label_blocktempB.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.label_samplesetpointA = wx.StaticText(self, label="sample setpoint A ("+self.celsius+"):")
self.label_samplesetpointA.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.label_samplesetpointB = wx.StaticText(self, label="sample setpoint B ("+self.celsius+"):")
self.label_samplesetpointB.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.label_stabilityA = wx.StaticText(self, label="sample stability A ("+self.celsius+ "/min):")
self.label_stabilityA.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.label_stabilityB = wx.StaticText(self, label="sample stability B ("+self.celsius+ "/min):")
self.label_stabilityB.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.label_avgT = wx.StaticText(self, label="avg T ("+self.celsius+"):")
self.label_avgT.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.label_dT = wx.StaticText(self, label=self.delta+"T ("+self.celsius+"):")
self.label_dT.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.label_seebeckchromel = wx.StaticText(self, label="seebeck (chromel) ("+self.mu+"V/"+self.celsius+"):")
self.label_seebeckchromel.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.label_seebeckalumel = wx.StaticText(self, label="seebeck (alumel) ("+self.mu+"V/"+self.celsius+"):")
self.label_seebeckalumel.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.label_mea = wx.StaticText(self, label="seebeck measurement")
self.label_mea.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.ctimecurrent = wx.StaticText(self, label=self.ctime)
self.ctimecurrent.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.tcurrent = wx.StaticText(self, label=self.t)
self.tcurrent.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.chromelVcurrent = wx.StaticText(self, label=self.chromelV)
self.chromelVcurrent.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.alumelVcurrent = wx.StaticText(self, label=self.alumelV)
self.alumelVcurrent.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.sampletempAcurrent = wx.StaticText(self, label=self.sampletempA)
self.sampletempAcurrent.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.sampletempBcurrent = wx.StaticText(self, label=self.sampletempB)
self.sampletempBcurrent.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.blocktempAcurrent = wx.StaticText(self, label=self.blocktempA)
self.blocktempAcurrent.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.blocktempBcurrent = wx.StaticText(self, label=self.blocktempB)
self.blocktempBcurrent.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.samplesetpointAcurrent = wx.StaticText(self, label=self.samplesetpointA)
self.samplesetpointAcurrent.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.samplesetpointBcurrent = wx.StaticText(self, label=self.samplesetpointB)
self.samplesetpointBcurrent.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.stabilityAcurrent = wx.StaticText(self, label=self.stabilityA)
self.stabilityAcurrent.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.stabilityBcurrent = wx.StaticText(self, label=self.stabilityB)
self.stabilityBcurrent.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.avgTcurrent = wx.StaticText(self, label=self.avgT)
self.avgTcurrent.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.dTcurrent = wx.StaticText(self, label=self.dT)
self.dTcurrent.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.seebeckchromelcurrent = wx.StaticText(self, label=self.seebeckchromel)
self.seebeckchromelcurrent.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.seebeckalumelcurrent = wx.StaticText(self, label=self.seebeckalumel)
self.seebeckalumelcurrent.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.meacurrent = wx.StaticText(self, label=self.mea)
self.meacurrent.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
#end def
#--------------------------------------------------------------------------
def update_values(self):
self.ctimecurrent.SetLabel(self.ctime)
self.tcurrent.SetLabel(self.t)
self.chromelVcurrent.SetLabel(self.chromelV)
self.alumelVcurrent.SetLabel(self.alumelV)
self.sampletempAcurrent.SetLabel(self.sampletempA)
self.sampletempBcurrent.SetLabel(self.sampletempB)
self.blocktempAcurrent.SetLabel(self.blocktempA)
self.blocktempBcurrent.SetLabel(self.blocktempB)
self.samplesetpointAcurrent.SetLabel(self.samplesetpointA)
self.samplesetpointBcurrent.SetLabel(self.samplesetpointB)
self.stabilityAcurrent.SetLabel(self.stabilityA)
self.stabilityBcurrent.SetLabel(self.stabilityB)
self.avgTcurrent.SetLabel(self.avgT)
self.dTcurrent.SetLabel(self.dT)
self.seebeckchromelcurrent.SetLabel(self.seebeckchromel)
self.seebeckalumelcurrent.SetLabel(self.seebeckalumel)
self.meacurrent.SetLabel(self.mea)
#end def
#--------------------------------------------------------------------------
def create_sizer(self):
sizer = wx.GridBagSizer(20,2)
sizer.Add(self.titlePanel, (0, 0), span = (1,2), border=5, flag=wx.ALIGN_CENTER_HORIZONTAL)
sizer.Add(self.linebreak1,(1,0), span = (1,2))
sizer.Add(self.label_ctime, (2,0))
sizer.Add(self.ctimecurrent, (2, 1),flag=wx.ALIGN_CENTER_HORIZONTAL)
sizer.Add(self.label_t, (3,0))
sizer.Add(self.tcurrent, (3, 1),flag=wx.ALIGN_CENTER_HORIZONTAL)
#sizer.Add(self.linebreak2,(4,0), span = (1,2))
sizer.Add(self.label_chromelV, (4, 0))
sizer.Add(self.chromelVcurrent, (4, 1),flag=wx.ALIGN_CENTER_HORIZONTAL)
sizer.Add(self.label_alumelV, (5,0))
sizer.Add(self.alumelVcurrent, (5,1),flag=wx.ALIGN_CENTER_HORIZONTAL)
#sizer.Add(self.linebreak3,(7,0), span = (1,2))
sizer.Add(self.label_sampletempA, (6,0))
sizer.Add(self.sampletempAcurrent, (6,1),flag=wx.ALIGN_CENTER_HORIZONTAL)
sizer.Add(self.label_samplesetpointA, (7,0))
sizer.Add(self.samplesetpointAcurrent, (7,1),flag=wx.ALIGN_CENTER_HORIZONTAL)
sizer.Add(self.label_stabilityA, (8,0))
sizer.Add(self.stabilityAcurrent, (8, 1),flag=wx.ALIGN_CENTER_HORIZONTAL)
sizer.Add(self.label_blocktempA, (9,0))
sizer.Add(self.blocktempAcurrent, (9,1),flag=wx.ALIGN_CENTER_HORIZONTAL)
#sizer.Add(self.linebreak4,(12,0), span = (1,2))
sizer.Add(self.label_sampletempB, (10,0))
sizer.Add(self.sampletempBcurrent, (10,1),flag=wx.ALIGN_CENTER_HORIZONTAL)
sizer.Add(self.label_samplesetpointB, (11,0))
sizer.Add(self.samplesetpointBcurrent, (11,1),flag=wx.ALIGN_CENTER_HORIZONTAL)
sizer.Add(self.label_stabilityB, (12,0))
sizer.Add(self.stabilityBcurrent, (12, 1),flag=wx.ALIGN_CENTER_HORIZONTAL)
sizer.Add(self.label_blocktempB, (13,0))
sizer.Add(self.blocktempBcurrent, (13,1),flag=wx.ALIGN_CENTER_HORIZONTAL)
#sizer.Add(self.linebreak5,(17,0), span = (1,2))
sizer.Add(self.label_avgT, (14,0))
sizer.Add(self.avgTcurrent, (14,1),flag=wx.ALIGN_CENTER_HORIZONTAL)
sizer.Add(self.label_dT, (15,0))
sizer.Add(self.dTcurrent, (15,1),flag=wx.ALIGN_CENTER_HORIZONTAL)
#sizer.Add(self.linebreak6,(20,0), span = (1,2))
sizer.Add(self.label_seebeckchromel, (16,0))
sizer.Add(self.seebeckchromelcurrent, (16,1),flag=wx.ALIGN_CENTER_HORIZONTAL)
sizer.Add(self.label_seebeckalumel, (17,0))
sizer.Add(self.seebeckalumelcurrent, (17,1),flag=wx.ALIGN_CENTER_HORIZONTAL)
#sizer.Add(self.linebreak7,(23,0), span = (1,2))
sizer.Add(self.label_mea, (18,0))
sizer.Add(self.meacurrent, (18,1),flag=wx.ALIGN_CENTER_HORIZONTAL)
sizer.Add(self.linebreak2, (19,0), span = (1,2))
self.SetSizer(sizer)
#end def
#end class
###############################################################################
###############################################################################
class VoltagePanel(wx.Panel):
"""
GUI Window for plotting voltage data.
"""
#--------------------------------------------------------------------------
def __init__(self, *args, **kwargs):
wx.Panel.__init__(self, *args, **kwargs)
global filePath
global tchromelV_list
global chromelV_list
global talumelV_list
global alumelV_list
self.create_title("Voltage Panel")
self.init_plot()
self.canvas = FigureCanvasWxAgg(self, -1, self.figure)
self.create_control_panel()
self.create_sizer()
pub.subscribe(self.OnChromelVoltage, "Chromel Voltage")
pub.subscribe(self.OnchromelVTime, "Time Chromel Voltage")
pub.subscribe(self.OnAlumelVoltage, "Alumel Voltage")
pub.subscribe(self.OnalumelVTime, "Time Alumel Voltage")
# For saving the plots at the end of data acquisition:
pub.subscribe(self.save_plot, "Save_All")
self.animator = animation.FuncAnimation(self.figure, self.draw_plot, interval=500, blit=False)
#end init
#--------------------------------------------------------------------------
def create_title(self, name):
self.titlePanel = wx.Panel(self, -1)
title = wx.StaticText(self.titlePanel, label=name)
font_title = wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.BOLD)
title.SetFont(font_title)
hbox = wx.BoxSizer(wx.HORIZONTAL)
hbox.Add((0,-1))
hbox.Add(title, 0, wx.LEFT, 5)
self.titlePanel.SetSizer(hbox)
#end def
#--------------------------------------------------------------------------
def create_control_panel(self):
self.xmin_control = BoundControlBox(self, -1, "t min", 0)
self.xmax_control = BoundControlBox(self, -1, "t max", 100)
self.ymin_control = BoundControlBox(self, -1, "V min", -1000)
self.ymax_control = BoundControlBox(self, -1, "V max", 1000)
self.hbox1 = wx.BoxSizer(wx.HORIZONTAL)
self.hbox1.AddSpacer(10)
self.hbox1.Add(self.xmin_control, border=5, flag=wx.ALL)
self.hbox1.Add(self.xmax_control, border=5, flag=wx.ALL)
self.hbox1.AddSpacer(10)
self.hbox1.Add(self.ymin_control, border=5, flag=wx.ALL)
self.hbox1.Add(self.ymax_control, border=5, flag=wx.ALL)
#end def
#--------------------------------------------------------------------------
def OnChromelVoltage(self, msg):
self.chromelV = float(msg)
chromelV_list.append(self.chromelV)
tchromelV_list.append(self.tchromelV)
#end def
#--------------------------------------------------------------------------
def OnchromelVTime(self, msg):
self.tchromelV = float(msg)
#end def
#--------------------------------------------------------------------------
def OnAlumelVoltage(self, msg):
self.alumelV = float(msg)
alumelV_list.append(self.alumelV)
talumelV_list.append(self.talumelV)
#end def
#--------------------------------------------------------------------------
def OnalumelVTime(self, msg):
self.talumelV = float(msg)
#end def
#--------------------------------------------------------------------------
def init_plot(self):
self.dpi = 100
self.colorH = 'g'
self.colorL = 'y'
self.figure = Figure((6,2), dpi=self.dpi)
self.subplot = self.figure.add_subplot(111)
self.lineH, = self.subplot.plot(tchromelV_list,chromelV_list, color=self.colorH, linewidth=1)
self.lineL, = self.subplot.plot(talumelV_list,alumelV_list, color=self.colorL, linewidth=1)
self.legend = self.figure.legend( (self.lineH, self.lineL), (r"$V_{chromel}$",r"$V_{alumel}$"), (0.15,0.7),fontsize=8)
#self.subplot.text(0.05, .95, r'$X(f) = \mathcal{F}\{x(t)\}$', \
#verticalalignment='top', transform = self.subplot.transAxes)
#end def
#--------------------------------------------------------------------------
def draw_plot(self,i):
self.subplot.clear()
#self.subplot.set_title("voltage vs. time", fontsize=12)
self.subplot.set_ylabel(r"voltage ($\mu V$)", fontsize = 8)
self.subplot.set_xlabel("time (s)", fontsize = 8)
# Adjustable scale:
if self.xmax_control.is_auto():
xmax = max(tchromelV_list+talumelV_list)
else:
xmax = float(self.xmax_control.manual_value())
if self.xmin_control.is_auto():
xmin = 0
else:
xmin = float(self.xmin_control.manual_value())
if self.ymin_control.is_auto():
minV = min(chromelV_list+alumelV_list)
ymin = minV - abs(minV)*0.3
else:
ymin = float(self.ymin_control.manual_value())
if self.ymax_control.is_auto():
maxV = max(chromelV_list+alumelV_list)
ymax = maxV + abs(maxV)*0.3
else:
ymax = float(self.ymax_control.manual_value())
self.subplot.set_xlim([xmin, xmax])
self.subplot.set_ylim([ymin, ymax])
pylab.setp(self.subplot.get_xticklabels(), fontsize=8)
pylab.setp(self.subplot.get_yticklabels(), fontsize=8)
self.lineH, = self.subplot.plot(tchromelV_list,chromelV_list, color=self.colorH, linewidth=1)
self.lineL, = self.subplot.plot(talumelV_list,alumelV_list, color=self.colorL, linewidth=1)
return (self.lineH, self.lineL)
#return (self.subplot.plot( tchromelV_list, chromelV_list, color=self.colorH, linewidth=1),
#self.subplot.plot( talumelV_list, alumelV_list, color=self.colorL, linewidth=1))
#end def
#--------------------------------------------------------------------------
def save_plot(self, msg):
path = filePath + "/Voltage_Plot.png"
self.canvas.print_figure(path)
#end def
#--------------------------------------------------------------------------
def create_sizer(self):
sizer = wx.GridBagSizer(3,1)
sizer.Add(self.titlePanel, (0, 0), flag=wx.ALIGN_CENTER_HORIZONTAL)
sizer.Add(self.canvas, ( 1,0), flag=wx.ALIGN_CENTER_HORIZONTAL)
sizer.Add(self.hbox1, (2,0), flag=wx.ALIGN_CENTER_HORIZONTAL)
self.SetSizer(sizer)
#end def
#end class
###############################################################################
###############################################################################
class TemperaturePanel(wx.Panel):
"""
GUI Window for plotting temperature data.
"""
#--------------------------------------------------------------------------
def __init__(self, *args, **kwargs):
wx.Panel.__init__(self, *args, **kwargs)
global filePath
global tsampletempA_list
global sampletempA_list
global tsampletempB_list
global sampletempB_list
global tblocktemp_list
global blocktempA_list
global blocktempB_list
self.create_title("Temperature Panel")
self.init_plot()
self.canvas = FigureCanvasWxAgg(self, -1, self.figure)
self.create_control_panel()
self.create_sizer()
pub.subscribe(self.OnTimeSampleTempA, "Time Sample Temp A")
pub.subscribe(self.OnSampleTempA, "Sample Temp A")
pub.subscribe(self.OnTimeSampleTempB, "Time Sample Temp B")
pub.subscribe(self.OnSampleTempB, "Sample Temp B")
pub.subscribe(self.OnTimeBlockTemp, "Time Block Temp")
pub.subscribe(self.OnBlockTempA, "Block Temp A")
pub.subscribe(self.OnBlockTempB, "Block Temp B")
# For saving the plots at the end of data acquisition:
pub.subscribe(self.save_plot, "Save_All")
self.animator = animation.FuncAnimation(self.figure, self.draw_plot, interval=500, blit=False)
#end init
#--------------------------------------------------------------------------
def create_title(self, name):
self.titlePanel = wx.Panel(self, -1)
title = wx.StaticText(self.titlePanel, label=name)
font_title = wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.BOLD)
title.SetFont(font_title)
hbox = wx.BoxSizer(wx.HORIZONTAL)
hbox.Add((0,-1))
hbox.Add(title, 0, wx.LEFT, 5)
self.titlePanel.SetSizer(hbox)
#end def
#--------------------------------------------------------------------------
def create_control_panel(self):
self.xmin_control = BoundControlBox(self, -1, "t min", 0)
self.xmax_control = BoundControlBox(self, -1, "t max", 100)
self.ymin_control = BoundControlBox(self, -1, "T min", 0)
self.ymax_control = BoundControlBox(self, -1, "T max", 500)
self.hbox1 = wx.BoxSizer(wx.HORIZONTAL)
self.hbox1.AddSpacer(10)
self.hbox1.Add(self.xmin_control, border=5, flag=wx.ALL)
self.hbox1.Add(self.xmax_control, border=5, flag=wx.ALL)
self.hbox1.AddSpacer(10)
self.hbox1.Add(self.ymin_control, border=5, flag=wx.ALL)
self.hbox1.Add(self.ymax_control, border=5, flag=wx.ALL)
#end def
#--------------------------------------------------------------------------
def OnTimeSampleTempA(self, msg):
self.tsampletempA = float(msg)
#end def
#--------------------------------------------------------------------------
def OnSampleTempA(self, msg):
self.sampletempA = float(msg)
sampletempA_list.append(self.sampletempA)
tsampletempA_list.append(self.tsampletempA)
#end def
#--------------------------------------------------------------------------
def OnTimeSampleTempB(self, msg):
self.tsampletempB = float(msg)
#end def
#--------------------------------------------------------------------------
def OnSampleTempB(self, msg):
self.sampletempB = float(msg)
sampletempB_list.append(self.sampletempB)
tsampletempB_list.append(self.tsampletempB)
#end def
#--------------------------------------------------------------------------
def OnTimeBlockTemp(self, msg):
self.tblocktemp = float(msg)
tblocktemp_list.append(self.tblocktemp)
blocktempA_list.append(self.blocktempA)
blocktempB_list.append(self.blocktempB)
#end def
#--------------------------------------------------------------------------
def OnBlockTempA(self, msg):
self.blocktempA = float(msg)
#end def
#--------------------------------------------------------------------------
def OnBlockTempB(self, msg):
self.blocktempB = float(msg)
#end def
#--------------------------------------------------------------------------
def init_plot(self):
self.dpi = 100
self.colorSTA = 'r'
self.colorSTB = 'b'
self.colorBTA = 'm'
self.colorBTB = 'c'
self.figure = Figure((6,2), dpi=self.dpi)
self.subplot = self.figure.add_subplot(111)
self.lineSTA, = self.subplot.plot(tsampletempA_list,sampletempA_list, color=self.colorSTA, linewidth=1)
self.lineSTB, = self.subplot.plot(tsampletempB_list,sampletempB_list, color=self.colorSTB, linewidth=1)
self.lineBTA, = self.subplot.plot(tblocktemp_list,blocktempA_list, color=self.colorBTA, linewidth=1)
self.lineBTB, = self.subplot.plot(tblocktemp_list,blocktempB_list, color=self.colorBTB, linewidth=1)
self.legend = self.figure.legend( (self.lineSTA, self.lineBTA, self.lineSTB, self.lineBTB), (r"$T_A$ (sample)",r"$T_A$ (block)",r"$T_B$ (sample)",r"$T_B$ (block)"), (0.15,0.50),fontsize=8)
#self.subplot.text(0.05, .95, r'$X(f) = \mathcal{F}\{x(t)\}$', \
#verticalalignment='top', transform = self.subplot.transAxes)
#end def
#--------------------------------------------------------------------------
def draw_plot(self,i):
self.subplot.clear()
#self.subplot.set_title("temperature vs. time", fontsize=12)
self.subplot.set_ylabel(r"temperature ($\degree C$)", fontsize = 8)
self.subplot.set_xlabel("time (s)", fontsize = 8)
# Adjustable scale:
if self.xmax_control.is_auto():
xmax = max(tsampletempA_list+tsampletempB_list+tblocktemp_list)
else:
xmax = float(self.xmax_control.manual_value())
if self.xmin_control.is_auto():
xmin = 0
else:
xmin = float(self.xmin_control.manual_value())
if self.ymin_control.is_auto():
minT = min(sampletempA_list+sampletempB_list+blocktempA_list+blocktempB_list)
ymin = minT - abs(minT)*0.3
else:
ymin = float(self.ymin_control.manual_value())
if self.ymax_control.is_auto():
maxT = max(sampletempA_list+sampletempB_list+blocktempA_list+blocktempB_list)
ymax = maxT + abs(maxT)*0.3
else:
ymax = float(self.ymax_control.manual_value())
self.subplot.set_xlim([xmin, xmax])
self.subplot.set_ylim([ymin, ymax])
pylab.setp(self.subplot.get_xticklabels(), fontsize=8)
pylab.setp(self.subplot.get_yticklabels(), fontsize=8)
self.lineSTA, = self.subplot.plot(tsampletempA_list,sampletempA_list, color=self.colorSTA, linewidth=1)
self.lineSTB, = self.subplot.plot(tsampletempB_list,sampletempB_list, color=self.colorSTB, linewidth=1)
self.lineBTA, = self.subplot.plot(tblocktemp_list,blocktempA_list, color=self.colorBTA, linewidth=1)
self.lineBTB, = self.subplot.plot(tblocktemp_list,blocktempB_list, color=self.colorBTB, linewidth=1)
return (self.lineSTA, self.lineSTB, self.lineBTA, self.lineBTB)
#end def
#--------------------------------------------------------------------------
def save_plot(self, msg):
path = filePath + "/Temperature_Plot.png"
self.canvas.print_figure(path)
#end def
#--------------------------------------------------------------------------
def create_sizer(self):
sizer = wx.GridBagSizer(3,1)
sizer.Add(self.titlePanel, (0, 0),flag=wx.ALIGN_CENTER_HORIZONTAL)
sizer.Add(self.canvas, ( 1,0),flag=wx.ALIGN_CENTER_HORIZONTAL)
sizer.Add(self.hbox1, (2,0),flag=wx.ALIGN_CENTER_HORIZONTAL)
self.SetSizer(sizer)
#end def
#end class
###############################################################################
###############################################################################
class Frame(wx.Frame):
"""
Main frame window in which GUI resides
"""
#--------------------------------------------------------------------------
def __init__(self, *args, **kwargs):
wx.Frame.__init__(self, *args, **kwargs)
self.init_UI()
self.create_statusbar()
self.create_menu()
pub.subscribe(self.update_statusbar, "Status Bar")
#end init
#--------------------------------------------------------------------------
def init_UI(self):
self.SetBackgroundColour('#E0EBEB')
self.userpanel = UserPanel(self, size=wx.DefaultSize)
self.statuspanel = StatusPanel(self,size=wx.DefaultSize)
self.voltagepanel = VoltagePanel(self, size=wx.DefaultSize)
self.temperaturepanel = TemperaturePanel(self, size=wx.DefaultSize)
self.statuspanel.SetBackgroundColour('#ededed')
sizer = wx.GridBagSizer(2, 3)
sizer.Add(self.userpanel, (0,0),flag=wx.ALIGN_CENTER_HORIZONTAL, span = (2,1))
sizer.Add(self.statuspanel, (0,2),flag=wx.ALIGN_CENTER_HORIZONTAL, span = (2,1))
sizer.Add(self.voltagepanel, (0,1),flag=wx.ALIGN_CENTER_HORIZONTAL)
sizer.Add(self.temperaturepanel, (1,1),flag=wx.ALIGN_CENTER_HORIZONTAL)
sizer.Fit(self)
self.SetSizer(sizer)
self.SetTitle('High Temp Seebeck GUI')
self.Centre()
#end def
#--------------------------------------------------------------------------
def create_menu(self):
# Menu Bar with File, Quit
menubar = wx.MenuBar()
fileMenu = wx.Menu()
qmi = wx.MenuItem(fileMenu, APP_EXIT, '&Quit\tCtrl+Q')
#qmi.SetBitmap(wx.Bitmap('exit.png'))
fileMenu.AppendItem(qmi)
self.Bind(wx.EVT_MENU, self.onQuit, id=APP_EXIT)
menubar.Append(fileMenu, 'File')
self.SetMenuBar(menubar)
#end def
#--------------------------------------------------------------------------
def onQuit(self, e):
global abort_ID
abort_ID=1
self.Destroy()
self.Close()
sys.stdout.close()
sys.stderr.close()
#end def
#--------------------------------------------------------------------------
def create_statusbar(self):
self.statusbar = ESB.EnhancedStatusBar(self, -1)
self.statusbar.SetSize((-1, 23))
self.statusbar.SetFieldsCount(8)
self.SetStatusBar(self.statusbar)
self.space_between = 10
### Create Widgets for the statusbar:
# Status:
self.status_text = wx.StaticText(self.statusbar, -1, "Ready")
self.width0 = 105
# Placer 1:
placer1 = wx.StaticText(self.statusbar, -1, " ")
# Title:
#measurement_text = wx.StaticText(self.statusbar, -1, "Measurement Indicators:")
#boldFont = wx.Font(9, wx.DEFAULT, wx.NORMAL, wx.BOLD)
#measurement_text.SetFont(boldFont)
#self.width1 = measurement_text.GetRect().width + self.space_between
# PID Tolerance:
pidTol_text = wx.StaticText(self.statusbar, -1, "Within Tolerance:")
self.width2 = pidTol_text.GetRect().width + self.space_between
self.indicator_tol = wx.StaticText(self.statusbar, -1, "-")
self.width3 = 25
# Stability Threshold:
stableThresh_text = wx.StaticText(self.statusbar, -1, "Within Stability Threshold:")
self.width4 = stableThresh_text.GetRect().width + 5
self.indicator_stable = wx.StaticText(self.statusbar, -1, "-")
self.width5 = self.width3
# Placer 2:
placer2 = wx.StaticText(self.statusbar, -1, " ")
# Version:
version_label = wx.StaticText(self.statusbar, -1, "Version: %s" % version)
self.width8 = version_label.GetRect().width + self.space_between
# Set widths of each piece of the status bar:
self.statusbar.SetStatusWidths([self.width0, 50, self.width2, self.width3, self.width4, self.width5, -1, self.width8])
### Add the widgets to the status bar:
# Status:
self.statusbar.AddWidget(self.status_text, ESB.ESB_ALIGN_CENTER_HORIZONTAL, ESB.ESB_ALIGN_CENTER_VERTICAL)
# Placer 1:
self.statusbar.AddWidget(placer1)
# Title:
#self.statusbar.AddWidget(measurement_text, ESB.ESB_ALIGN_CENTER_HORIZONTAL, ESB.ESB_ALIGN_CENTER_VERTICAL)
# PID Tolerance:
self.statusbar.AddWidget(pidTol_text, ESB.ESB_ALIGN_CENTER_HORIZONTAL, ESB.ESB_ALIGN_CENTER_VERTICAL)
self.statusbar.AddWidget(self.indicator_tol, ESB.ESB_ALIGN_CENTER_HORIZONTAL, ESB.ESB_ALIGN_CENTER_VERTICAL)
# Stability Threshold:
self.statusbar.AddWidget(stableThresh_text, ESB.ESB_ALIGN_CENTER_HORIZONTAL, ESB.ESB_ALIGN_CENTER_VERTICAL)
self.statusbar.AddWidget(self.indicator_stable, ESB.ESB_ALIGN_CENTER_HORIZONTAL, ESB.ESB_ALIGN_CENTER_VERTICAL)
# Placer 2
self.statusbar.AddWidget(placer2)
# Version:
self.statusbar.AddWidget(version_label, ESB.ESB_ALIGN_CENTER_HORIZONTAL, ESB.ESB_ALIGN_CENTER_VERTICAL)
#end def
#--------------------------------------------------------------------------
def update_statusbar(self, msg):
string = msg
# Status:
if string == 'Running' or string == 'Finished, Ready' or string == 'Exception Occurred' or string=='Checking':
self.status_text.SetLabel(string)
self.status_text.SetBackgroundColour(wx.NullColour)
if string == 'Exception Occurred':
self.status_text.SetBackgroundColour("RED")
#end if
#end if
else:
tol = string[0]
stable = string[1]
# PID Tolerance indicator:
self.indicator_tol.SetLabel(tol)
if tol == 'OK':
self.indicator_tol.SetBackgroundColour("GREEN")
#end if
else:
self.indicator_tol.SetBackgroundColour("RED")
#end else
# Stability Threshold indicator:
self.indicator_stable.SetLabel(stable)
if stable == 'OK':
self.indicator_stable.SetBackgroundColour("GREEN")
#end if
else:
self.indicator_stable.SetBackgroundColour("RED")
#end else
#end else
#end def
#end class
###############################################################################
###############################################################################
class App(wx.App):
"""
App for initializing program
"""
#--------------------------------------------------------------------------
def OnInit(self):
self.frame = Frame(parent=None, title="High Temp Seebeck GUI", size=(1280,1280))
self.frame.Show()
setup = Setup()
return True
#end init
#end class
###############################################################################
#==============================================================================
if __name__=='__main__':
app = App()
app.MainLoop()
#end if
| gpl-3.0 |
wolfd/ocular | scripts/stereo.py | 1 | 5432 | #!/usr/bin/env python
import numpy as np
import cv2
import glob
import argparse
import os
import pykitti # version 0.1.2
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d
from utils.ply_export import write_ply
from features.sift_image import SIFTImage
from utils.camera_transform_guesser import rigid_transform_3D
DEBUG_SIFT_3D = False
DEBUG_STEREO_3D = False
if __name__ == '__main__':
print('loading')
dataset = pykitti.odometry(
'/home/wolf/kitti/dataset',
'00',
frame_range=range(0, 100, 2)
)
dataset.load_calib()
dataset.load_timestamps()
dataset.load_poses()
dataset.load_gray()
window_size = 5
min_disp = 16
num_disp = 112 - min_disp
cam = dataset.calib.K_cam0
dist_b = dataset.calib.b_gray
cam_left = np.concatenate((
cam,
np.matrix([0, 0, 0]).T
), axis=1)
cam_right = np.concatenate((
cam,
np.matrix([dist_b * cam[0, 0], 0, 0]).T
), axis=1)
R1, R2, P1, P2, Q, validPixROI1, validPixROI2 = cv2.stereoRectify(
cam, 0,
cam, 0,
(1241, 376),
np.matrix([
[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]
]),
np.array([dist_b, 0, 0])
)
stereo = cv2.StereoSGBM_create(
minDisparity=min_disp,
numDisparities=num_disp,
blockSize=16,
P1=8*3*window_size**2,
P2=32*3*window_size**2,
disp12MaxDiff=1,
uniquenessRatio=10,
speckleWindowSize=100,
speckleRange=32
)
initial_position = np.array([0., 0., 0., 1.])
initial_transform = np.matrix([
[1., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 1., 0.],
[0., 0., 0., 1.]
])
current_transform = initial_transform
cur_t = np.array([0., 0., 0.])
cur_rot = np.matrix([
[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]
])
path = []
real_poses = np.array(dataset.T_w_cam0)[dataset.frame_range]
pose_index = 0
last_points = None
for pair in dataset.gray:
print('computing disparity')
left = np.uint8(pair.left * 255.)
right = np.uint8(pair.right * 255.)
sift_left = SIFTImage(left)
sift_right = SIFTImage(right)
disp = stereo.compute(
left,
right
).astype(np.float32) / 16.0
# left gray camera (they're all the same numbers in this dataset)
calibration_matrix = dataset.calib.K_cam0
points = cv2.reprojectImageTo3D(disp, Q)
if last_points is not None:
last_pts, now_pts = last_sift_left.correspondences(sift_left)
# make pts integers for easy indexing
last_pts_i = last_pts.astype(np.uint32)
now_pts_i = now_pts.astype(np.uint32)
if DEBUG_SIFT_3D:
out_points = last_points[last_pts_i[:, 1], last_pts_i[:, 0]]
out_gray = last_sift_left.image[
last_pts_i[:, 1], last_pts_i[:, 0]
]
out_file = 'out-sift-0.ply'
write_ply(out_file, out_points, out_gray)
out_points = points[now_pts_i[:, 1], now_pts_i[:, 0]]
out_gray = left[now_pts_i[:, 1], now_pts_i[:, 0]]
out_file = 'out-sift-1.ply'
write_ply(out_file, out_points, out_gray)
# get the 3D coordinates of the matched features
last_3d_coords = last_points[last_pts_i[:, 1], last_pts_i[:, 0]]
now_3d_coords = points[now_pts_i[:, 1], now_pts_i[:, 0]]
ret_R, ret_t = rigid_transform_3D(
last_3d_coords[-50:],
now_3d_coords[-50:]
)
cur_t = cur_t + np.dot(cur_rot, ret_t)
cur_rot = np.dot(ret_R, cur_rot)
current_transform = np.concatenate((cur_rot, cur_t.T), axis=1)
np.set_printoptions(suppress=True)
print('Det: {}'.format(np.linalg.det(current_transform[:, :3])))
print(current_transform)
print(real_poses[pose_index])
pose_index += 1
path.append(cur_t.reshape(-1, 3))
last_points = points
last_sift_left = sift_left
last_sift_right = sift_right
if DEBUG_STEREO_3D:
mask = disp > disp.min()
out_points = points[mask]
out_gray = left[mask]
out_file = 'out.ply'
write_ply(out_file, out_points, out_gray)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_aspect('equal')
np_path = np.array(path).reshape(-1, 3)
X = np_path[:, 0]
Y = np_path[:, 1]
Z = np_path[:, 2]
real_path = np.dot(real_poses, [0, 0, 0, 1])
ax.plot_wireframe(X, Y, Z)
RX = real_poses[:, 0]
RY = real_poses[:, 1]
RZ = real_poses[:, 2]
ax.plot_wireframe(RZ, RY, RX)
max_range = np.array([
X.max() - X.min(),
Y.max() - Y.min(),
Z.max() - Z.min()
]).max() / 2.0
mid_x = (X.max()+X.min()) * 0.5
mid_y = (Y.max()+Y.min()) * 0.5
mid_z = (Z.max()+Z.min()) * 0.5
ax.set_xlim(mid_x - max_range, mid_x + max_range)
ax.set_ylim(mid_y - max_range, mid_y + max_range)
ax.set_zlim(mid_z - max_range, mid_z + max_range)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plt.show()
| mit |
mavlyutovrus/interval_index | python/graphs_7_hdfs_indices.py | 1 | 2778 | # -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import matplotlib
from pylab import *
import numpy
from copy_reg import remove_extension
from heapq import heappush
from mpl_toolkits.axes_grid1 import host_subplot
import mpl_toolkits.axisartist as AA
colors = ["red", "green", "blue", "orange", "brown", "black", "silver", "aqua", "purple"]
suffix = ""
file_type = ""
def draw_scatter_plot(algo2results):
fig = figure(figsize=(8, 6), dpi=80)
grid(b=True, which='major', color='gray', axis="both", linestyle='--', zorder=-1)
font_size = 20
algos = [algo for algo in algo2results.keys()]
algos.sort()
print algos
print colors
#plt.xlim([0, 1000])
plt.ylim([0, 100])
for algo_index in xrange(len(algos)):
algo = algos[algo_index]
x_values = [x for x,y in algo2results[algo]]
y_values = [y for x,y in algo2results[algo]]
line, = plt.plot(x_values, y_values, lw=3, color=colors[algo_index])
line.set_zorder(1)
"""
plt.xlim([10000, 10**7])
plt.xscale('log')
plt.ylim([0, 2])
ylabel = plt.ylabel("Time per 100K queries [s]")
ylabel.set_fontsize(font_size)
xlabel = plt.xlabel("Dataset size")
xlabel.set_fontsize(font_size)
for ytick in plt.yticks()[1]:
ytick.set_fontsize(font_size)
for xtick in plt.xticks()[1]:
xtick.set_fontsize(font_size)
plt.tight_layout()
"""
plt.show()
def draw_legend():
font_size= 20
fig = figure(figsize=(8, 6), dpi=80)
p1 = plt.bar(range(len(algo2index)), range(len(algo2index)), 1.0, color="#7FCA9F")
for algo_index in xrange(len(algos)):
p1[algo_index].set_color(colors[algo_index])
fig = figure(figsize=(12, 6), dpi=80)
desc = [algo for algo in algos]
legend = plt.legend( p1, desc, shadow=False, loc=1, fontsize=font_size)
legend.draw_frame(True)
savefig("../graphs/test_results/legend" + file_type, transparent="True", pad_inches=0)
def calc_avg_minus_extremes(values):
values.sort()
quartile = len(values) / 4
values = values[3:-3]
import numpy, math
margin_err = 1.96 * numpy.std(values) / math.sqrt(len(values))
return float(sum(values)) / len(values), margin_err
RESPONSE_SIZE_POS = 0
TIME_RESULT_POS = 3
MEM_CONSUMPTION_POS = 1
QUERIES_COUNT = 200000
if 1:
trends = {}
for line in open("../test_results/7_hdfs_indices.txt"):
chunks = line.split()
algo = chunks[0]
if chunks[1] == "read_size":
algo += "_" + chunks[2].zfill(5)
x = int(chunks[-5])
y = float(chunks[-1])
trends.setdefault(algo, []).append((x, y / x))
for key in trends.keys():
trends[key].sort()
draw_scatter_plot(trends)
| apache-2.0 |
dfm/batman | docs/quickstart.py | 1 | 1892 | # The batman package: fast computation of exoplanet transit light curves
# Copyright (C) 2015 Laura Kreidberg
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import batman
import numpy as np
import matplotlib.pyplot as plt
from pylab import *
from matplotlib import rc
rc('font',**{'family':'sans-serif','sans-serif':['Arial']})
matplotlib.rcParams.update({'font.size':14})
params = batman.TransitParams()
params.t0 = 0. #time of periastron passage (for eccentric orbits), OR
#mid-transit time (for circular orbits)
params.per = 1. #orbital period
params.rp = 0.1 #planet radius (in units of stellar radii)
params.a = 15. #semi-major axis (in units of stellar radii)
params.inc = 87. #orbital inclination (in degrees)
params.ecc = 0. #eccentricity
params.w = 90. #longitude of periastron (in degrees)
params.u = [0.1, 0.3] #limb darkening coefficients
params.limb_dark = "quadratic" #limb darkening model
t = np.linspace(-0.025, 0.025, 1000) #times at which to calculate light curve
m = batman.TransitModel(params, t) #initializes model
flux = m.light_curve(params)
plt.plot(t, flux)
plt.xlabel("Time from central transit (days)")
plt.ylabel("Relative flux")
plt.ylim((0.989, 1.001))
plt.savefig("lc.png")
plt.show()
| gpl-3.0 |
aabadie/scikit-learn | sklearn/tests/test_discriminant_analysis.py | 15 | 13124 | import sys
import numpy as np
from nose import SkipTest
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.datasets import make_blobs
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.discriminant_analysis import _cov
# import reload
version = sys.version_info
if version[0] == 3:
# Python 3+ import for reload. Builtin in Python2
if version[1] == 3:
reload = None
else:
from importlib import reload
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]], dtype='f')
y = np.array([1, 1, 1, 2, 2, 2])
y3 = np.array([1, 1, 2, 2, 3, 3])
# Degenerate data with only one feature (still should be separable)
X1 = np.array([[-2, ], [-1, ], [-1, ], [1, ], [1, ], [2, ]], dtype='f')
# Data is just 9 separable points in the plane
X6 = np.array([[0, 0], [-2, -2], [-2, -1], [-1, -1], [-1, -2],
[1, 3], [1, 2], [2, 1], [2, 2]])
y6 = np.array([1, 1, 1, 1, 1, 2, 2, 2, 2])
y7 = np.array([1, 2, 3, 2, 3, 1, 2, 3, 1])
# Degenerate data with 1 feature (still should be separable)
X7 = np.array([[-3, ], [-2, ], [-1, ], [-1, ], [0, ], [1, ], [1, ],
[2, ], [3, ]])
# Data that has zero variance in one dimension and needs regularization
X2 = np.array([[-3, 0], [-2, 0], [-1, 0], [-1, 0], [0, 0], [1, 0], [1, 0],
[2, 0], [3, 0]])
# One element class
y4 = np.array([1, 1, 1, 1, 1, 1, 1, 1, 2])
# Data with less samples in a class than n_features
X5 = np.c_[np.arange(8), np.zeros((8, 3))]
y5 = np.array([0, 0, 0, 0, 0, 1, 1, 1])
solver_shrinkage = [('svd', None), ('lsqr', None), ('eigen', None),
('lsqr', 'auto'), ('lsqr', 0), ('lsqr', 0.43),
('eigen', 'auto'), ('eigen', 0), ('eigen', 0.43)]
def test_lda_predict():
# Test LDA classification.
# This checks that LDA implements fit and predict and returns correct
# values for simple toy data.
for test_case in solver_shrinkage:
solver, shrinkage = test_case
clf = LinearDiscriminantAnalysis(solver=solver, shrinkage=shrinkage)
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y, 'solver %s' % solver)
# Assert that it works with 1D data
y_pred1 = clf.fit(X1, y).predict(X1)
assert_array_equal(y_pred1, y, 'solver %s' % solver)
# Test probability estimates
y_proba_pred1 = clf.predict_proba(X1)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y,
'solver %s' % solver)
y_log_proba_pred1 = clf.predict_log_proba(X1)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1,
8, 'solver %s' % solver)
# Primarily test for commit 2f34950 -- "reuse" of priors
y_pred3 = clf.fit(X, y3).predict(X)
# LDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y3), 'solver %s' % solver)
# Test invalid shrinkages
clf = LinearDiscriminantAnalysis(solver="lsqr", shrinkage=-0.2231)
assert_raises(ValueError, clf.fit, X, y)
clf = LinearDiscriminantAnalysis(solver="eigen", shrinkage="dummy")
assert_raises(ValueError, clf.fit, X, y)
clf = LinearDiscriminantAnalysis(solver="svd", shrinkage="auto")
assert_raises(NotImplementedError, clf.fit, X, y)
# Test unknown solver
clf = LinearDiscriminantAnalysis(solver="dummy")
assert_raises(ValueError, clf.fit, X, y)
def test_lda_priors():
# Test priors (negative priors)
priors = np.array([0.5, -0.5])
clf = LinearDiscriminantAnalysis(priors=priors)
msg = "priors must be non-negative"
assert_raise_message(ValueError, msg, clf.fit, X, y)
# Test that priors passed as a list are correctly handled (run to see if
# failure)
clf = LinearDiscriminantAnalysis(priors=[0.5, 0.5])
clf.fit(X, y)
# Test that priors always sum to 1
priors = np.array([0.5, 0.6])
prior_norm = np.array([0.45, 0.55])
clf = LinearDiscriminantAnalysis(priors=priors)
assert_warns(UserWarning, clf.fit, X, y)
assert_array_almost_equal(clf.priors_, prior_norm, 2)
def test_lda_coefs():
# Test if the coefficients of the solvers are approximately the same.
n_features = 2
n_classes = 2
n_samples = 1000
X, y = make_blobs(n_samples=n_samples, n_features=n_features,
centers=n_classes, random_state=11)
clf_lda_svd = LinearDiscriminantAnalysis(solver="svd")
clf_lda_lsqr = LinearDiscriminantAnalysis(solver="lsqr")
clf_lda_eigen = LinearDiscriminantAnalysis(solver="eigen")
clf_lda_svd.fit(X, y)
clf_lda_lsqr.fit(X, y)
clf_lda_eigen.fit(X, y)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_lsqr.coef_, 1)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_eigen.coef_, 1)
assert_array_almost_equal(clf_lda_eigen.coef_, clf_lda_lsqr.coef_, 1)
def test_lda_transform():
# Test LDA transform.
clf = LinearDiscriminantAnalysis(solver="svd", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = LinearDiscriminantAnalysis(solver="eigen", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = LinearDiscriminantAnalysis(solver="lsqr", n_components=1)
clf.fit(X, y)
msg = "transform not implemented for 'lsqr'"
assert_raise_message(NotImplementedError, msg, clf.transform, X)
def test_lda_explained_variance_ratio():
# Test if the sum of the normalized eigen vectors values equals 1,
# Also tests whether the explained_variance_ratio_ formed by the
# eigen solver is the same as the explained_variance_ratio_ formed
# by the svd solver
state = np.random.RandomState(0)
X = state.normal(loc=0, scale=100, size=(40, 20))
y = state.randint(0, 3, size=(40,))
clf_lda_eigen = LinearDiscriminantAnalysis(solver="eigen")
clf_lda_eigen.fit(X, y)
assert_almost_equal(clf_lda_eigen.explained_variance_ratio_.sum(), 1.0, 3)
clf_lda_svd = LinearDiscriminantAnalysis(solver="svd")
clf_lda_svd.fit(X, y)
assert_almost_equal(clf_lda_svd.explained_variance_ratio_.sum(), 1.0, 3)
tested_length = min(clf_lda_svd.explained_variance_ratio_.shape[0],
clf_lda_eigen.explained_variance_ratio_.shape[0])
# NOTE: clf_lda_eigen.explained_variance_ratio_ is not of n_components
# length. Make it the same length as clf_lda_svd.explained_variance_ratio_
# before comparison.
assert_array_almost_equal(clf_lda_svd.explained_variance_ratio_,
clf_lda_eigen.explained_variance_ratio_[:tested_length])
def test_lda_orthogonality():
# arrange four classes with their means in a kite-shaped pattern
# the longer distance should be transformed to the first component, and
# the shorter distance to the second component.
means = np.array([[0, 0, -1], [0, 2, 0], [0, -2, 0], [0, 0, 5]])
# We construct perfectly symmetric distributions, so the LDA can estimate
# precise means.
scatter = np.array([[0.1, 0, 0], [-0.1, 0, 0], [0, 0.1, 0], [0, -0.1, 0],
[0, 0, 0.1], [0, 0, -0.1]])
X = (means[:, np.newaxis, :] + scatter[np.newaxis, :, :]).reshape((-1, 3))
y = np.repeat(np.arange(means.shape[0]), scatter.shape[0])
# Fit LDA and transform the means
clf = LinearDiscriminantAnalysis(solver="svd").fit(X, y)
means_transformed = clf.transform(means)
d1 = means_transformed[3] - means_transformed[0]
d2 = means_transformed[2] - means_transformed[1]
d1 /= np.sqrt(np.sum(d1 ** 2))
d2 /= np.sqrt(np.sum(d2 ** 2))
# the transformed within-class covariance should be the identity matrix
assert_almost_equal(np.cov(clf.transform(scatter).T), np.eye(2))
# the means of classes 0 and 3 should lie on the first component
assert_almost_equal(np.abs(np.dot(d1[:2], [1, 0])), 1.0)
# the means of classes 1 and 2 should lie on the second component
assert_almost_equal(np.abs(np.dot(d2[:2], [0, 1])), 1.0)
def test_lda_scaling():
# Test if classification works correctly with differently scaled features.
n = 100
rng = np.random.RandomState(1234)
# use uniform distribution of features to make sure there is absolutely no
# overlap between classes.
x1 = rng.uniform(-1, 1, (n, 3)) + [-10, 0, 0]
x2 = rng.uniform(-1, 1, (n, 3)) + [10, 0, 0]
x = np.vstack((x1, x2)) * [1, 100, 10000]
y = [-1] * n + [1] * n
for solver in ('svd', 'lsqr', 'eigen'):
clf = LinearDiscriminantAnalysis(solver=solver)
# should be able to separate the data perfectly
assert_equal(clf.fit(x, y).score(x, y), 1.0,
'using covariance: %s' % solver)
def test_qda():
# QDA classification.
# This checks that QDA implements fit and predict and returns
# correct values for a simple toy dataset.
clf = QuadraticDiscriminantAnalysis()
y_pred = clf.fit(X6, y6).predict(X6)
assert_array_equal(y_pred, y6)
# Assure that it works with 1D data
y_pred1 = clf.fit(X7, y6).predict(X7)
assert_array_equal(y_pred1, y6)
# Test probas estimates
y_proba_pred1 = clf.predict_proba(X7)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y6)
y_log_proba_pred1 = clf.predict_log_proba(X7)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1, 8)
y_pred3 = clf.fit(X6, y7).predict(X6)
# QDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y7))
# Classes should have at least 2 elements
assert_raises(ValueError, clf.fit, X6, y4)
def test_qda_priors():
clf = QuadraticDiscriminantAnalysis()
y_pred = clf.fit(X6, y6).predict(X6)
n_pos = np.sum(y_pred == 2)
neg = 1e-10
clf = QuadraticDiscriminantAnalysis(priors=np.array([neg, 1 - neg]))
y_pred = clf.fit(X6, y6).predict(X6)
n_pos2 = np.sum(y_pred == 2)
assert_greater(n_pos2, n_pos)
def test_qda_store_covariances():
# The default is to not set the covariances_ attribute
clf = QuadraticDiscriminantAnalysis().fit(X6, y6)
assert_true(not hasattr(clf, 'covariances_'))
# Test the actual attribute:
clf = QuadraticDiscriminantAnalysis(store_covariances=True).fit(X6, y6)
assert_true(hasattr(clf, 'covariances_'))
assert_array_almost_equal(
clf.covariances_[0],
np.array([[0.7, 0.45], [0.45, 0.7]])
)
assert_array_almost_equal(
clf.covariances_[1],
np.array([[0.33333333, -0.33333333], [-0.33333333, 0.66666667]])
)
def test_qda_regularization():
# the default is reg_param=0. and will cause issues
# when there is a constant variable
clf = QuadraticDiscriminantAnalysis()
with ignore_warnings():
y_pred = clf.fit(X2, y6).predict(X2)
assert_true(np.any(y_pred != y6))
# adding a little regularization fixes the problem
clf = QuadraticDiscriminantAnalysis(reg_param=0.01)
with ignore_warnings():
clf.fit(X2, y6)
y_pred = clf.predict(X2)
assert_array_equal(y_pred, y6)
# Case n_samples_in_a_class < n_features
clf = QuadraticDiscriminantAnalysis(reg_param=0.1)
with ignore_warnings():
clf.fit(X5, y5)
y_pred5 = clf.predict(X5)
assert_array_equal(y_pred5, y5)
def test_deprecated_lda_qda_deprecation():
if reload is None:
raise SkipTest("Can't reload module on Python3.3")
def import_lda_module():
import sklearn.lda
# ensure that we trigger DeprecationWarning even if the sklearn.lda
# was loaded previously by another test.
reload(sklearn.lda)
return sklearn.lda
lda = assert_warns(DeprecationWarning, import_lda_module)
assert lda.LDA is LinearDiscriminantAnalysis
def import_qda_module():
import sklearn.qda
# ensure that we trigger DeprecationWarning even if the sklearn.qda
# was loaded previously by another test.
reload(sklearn.qda)
return sklearn.qda
qda = assert_warns(DeprecationWarning, import_qda_module)
assert qda.QDA is QuadraticDiscriminantAnalysis
def test_covariance():
x, y = make_blobs(n_samples=100, n_features=5,
centers=1, random_state=42)
# make features correlated
x = np.dot(x, np.arange(x.shape[1] ** 2).reshape(x.shape[1], x.shape[1]))
c_e = _cov(x, 'empirical')
assert_almost_equal(c_e, c_e.T)
c_s = _cov(x, 'auto')
assert_almost_equal(c_s, c_s.T)
| bsd-3-clause |
anand-c-goog/tensorflow | tensorflow/contrib/learn/python/learn/estimators/estimators_test.py | 5 | 4404 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Custom optimizer tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
import tensorflow as tf
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn import datasets
from tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score
from tensorflow.contrib.learn.python.learn.estimators._sklearn import train_test_split
class FeatureEngineeringFunctionTest(tf.test.TestCase):
"""Tests feature_engineering_fn."""
def testFeatureEngineeringFn(self):
def input_fn():
return {"x": tf.constant([1.])}, {"y": tf.constant([11.])}
def feature_engineering_fn(features, targets):
_, _ = features, targets
return {
"transformed_x": tf.constant([9.])
}, {
"transformed_y": tf.constant([99.])
}
def model_fn(features, targets):
# dummy variable:
_ = tf.Variable([0.])
_ = targets
predictions = features["transformed_x"]
loss = tf.constant([2.])
return predictions, loss, tf.no_op()
estimator = tf.contrib.learn.Estimator(
model_fn=model_fn,
feature_engineering_fn=feature_engineering_fn)
estimator.fit(input_fn=input_fn, steps=1)
prediction = next(estimator.predict(input_fn=input_fn, as_iterable=True))
# predictions = transformed_x (9)
self.assertEqual(9., prediction)
def testNoneFeatureEngineeringFn(self):
def input_fn():
return {"x": tf.constant([1.])}, {"y": tf.constant([11.])}
def feature_engineering_fn(features, targets):
_, _ = features, targets
return {"x": tf.constant([9.])}, {"y": tf.constant([99.])}
def model_fn(features, targets):
# dummy variable:
_ = tf.Variable([0.])
_ = targets
predictions = features["x"]
loss = tf.constant([2.])
return predictions, loss, tf.no_op()
estimator_with_fe_fn = tf.contrib.learn.Estimator(
model_fn=model_fn,
feature_engineering_fn=feature_engineering_fn)
estimator_with_fe_fn.fit(input_fn=input_fn, steps=1)
estimator_without_fe_fn = tf.contrib.learn.Estimator(model_fn=model_fn)
estimator_without_fe_fn.fit(input_fn=input_fn, steps=1)
# predictions = x
prediction_with_fe_fn = next(
estimator_with_fe_fn.predict(input_fn=input_fn, as_iterable=True))
self.assertEqual(9., prediction_with_fe_fn)
prediction_without_fe_fn = next(
estimator_without_fe_fn.predict(input_fn=input_fn, as_iterable=True))
self.assertEqual(1., prediction_without_fe_fn)
class CustomOptimizer(tf.test.TestCase):
"""Custom optimizer tests."""
def testIrisMomentum(self):
random.seed(42)
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = train_test_split(iris.data,
iris.target,
test_size=0.2,
random_state=42)
def custom_optimizer():
return tf.train.MomentumOptimizer(learning_rate=0.01, momentum=0.9)
classifier = learn.DNNClassifier(
hidden_units=[10, 20, 10],
feature_columns=learn.infer_real_valued_columns_from_input(x_train),
n_classes=3,
optimizer=custom_optimizer,
config=learn.RunConfig(tf_random_seed=1))
classifier.fit(x_train, y_train, steps=400)
predictions = np.array(list(classifier.predict(x_test)))
score = accuracy_score(y_test, predictions)
self.assertGreater(score, 0.65, "Failed with score = {0}".format(score))
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
kathleenleeper/div3 | bibliometrics/gender_bib_distribution.py | 1 | 3380 |
# coding: utf-8
# In[1]:
from __future__ import division
"""gendering"""
from genderComputer.genderComputer import GenderComputer
"""bibtex parsing"""
import os
import bibtexparser as b #module for bibtexin'
from bibtexparser.bparser import BibTexParser #import to add customization
from bibtexparser.customization import *
"""plotting functions"""
import matplotlib.pyplot as plt; plt.rcdefaults()
import numpy as np
import matplotlib.pyplot as plt
"""date time functions """
from datetime import datetime #idk bring in the system date or whatever
"""csv"""
import csv
# In[2]:
today = datetime.today()
bib = 'CriticalOpenNeuro.bib' #bring that bib file in
gc = GenderComputer(os.path.abspath('genderComputer/nameLists')) #make gendercomputer
# In[3]:
def customizations(record):
"""Use some functions delivered by the library
:param record: a record
:returns: -- customized record
"""
record = type(record)
record = doi(record)
record = convert_to_unicode(record)
record = author(record)
return record
# In[4]:
def parseFile(bib_file):
"""parse the bib file
:param bib_file: bibtex file to be parsed
:returns: -- a bibtex file object
"""
with open(bib_file) as bibtex_file:
parser = BibTexParser()
parser.homogenize = True
parser.customization = customizations
data = b.load(bibtex_file, parser = parser)
return data
# In[5]:
women = 0
men = 0
uni = 0
notav = 0
auCount = 0
unavailable = []
# In[6]:
def countGender(ts=True):
"""take the bib database and count genders of authors
"""
global auCount
global notav
global uni
global men
global women
global unavailable
for entry in data.entries:
title = entry["title"]
if "author" in entry:
authors = entry["author"]
elif ts==True:
print "no author in", title
for j in authors:
auCount += 1
gender = gc.resolveGender(j, None) #resolve gender, yay
if gender == 'male':
men += 1
elif gender == 'female':
women += 1
elif gender == 'unisex':
uni += 1
else:
notav += 1
if ts == True:
print j, title
# In[7]:
data = parseFile(bib) #run the parse file
countGender(False)
# In[8]:
"""writing names unassigned to a file for troubleshooting"""
with open('unavailable_gender', 'wb') as myfile:
wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
wr.writerow(unavailable)
# In[9]:
stats = {'Women':women, 'Men':men, 'Unisex':uni, 'Not Available':notav}
percents = {'Women':women, 'Men':men, 'Unisex':uni, 'Not Available':notav}
# In[10]:
for key in stats:
value = stats[key]
percent = value/auCount*100 #probably should fix so it can't break if dividing by zero
percents[key] = percent
# In[11]:
print stats
print percents
print auCount
# In[20]:
plt.bar(range(len(stats)), percents.values(), align='center', color="#2aa198")
plt.xticks(range(len(percents)), percents.keys(), color="#657b83")
plt.xlabel('Gender Assigned (generated ' + str(today) +')', color="#073642")
plt.ylabel('Percents', color="#073642")
# In[21]:
plt.savefig('gender_distr.png', bbox_inches='tight',transparent=True)
# In[ ]:
| gpl-3.0 |
sparklingpandas/sparklingpandas | sparklingpandas/pstats.py | 4 | 1595 | """Provide a way to work with panda data frames in Spark"""
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from sparklingpandas.utils import add_pyspark_path
add_pyspark_path()
import pandas
class PStats:
"""A object to wrap the stats/aggregation values"""
def __init__(self, dataframe):
self._df = dataframe.collect()
def __getitem__(self, key):
return PStatsOnColumn(self._df, key)
class PStatsOnColumn:
def __init__(self, df, key):
self._df = df
self._key = key
def min(self):
return self._df["MIN(" + self._key + ")"][0]
def max(self):
return self._df["MAX(" + self._key + ")"][0]
def avg(self):
return self._df["AVG(" + self._key + ")"][0]
def sum(self):
return self._df["COUNT(" + self._key + ")"][0]
def count(self):
return self.sum()
| apache-2.0 |
JohnGriffiths/dipy | doc/examples/streamline_length.py | 8 | 6039 | """
=====================================
Streamline length and size reduction
=====================================
This example shows how to calculate the lengths of a set of streamlines and
also how to compress the streamlines without considerably reducing their
lengths or overall shape.
A streamline in Dipy is represented as a numpy array of size
:math:`(N \times 3)` where each row of the array represent a 3D point of the
streamline. A set of streamlines is represented with a list of
numpy arrays of size :math:`(N_i \times 3)` for :math:`i=1:M` where $M$ is the
number of streamlines in the set.
"""
import numpy as np
from dipy.tracking.utils import length
from dipy.tracking.metrics import downsample
from dipy.tracking.distances import approx_polygon_track
"""
Let's first create a simple simulation of a bundle of streamlines using
a cosine function.
"""
def simulated_bundles(no_streamlines=50, n_pts=100):
t = np.linspace(-10, 10, n_pts)
bundle = []
for i in np.linspace(3, 5, no_streamlines):
pts = np.vstack((np.cos(2 * t/np.pi), np.zeros(t.shape) + i, t )).T
bundle.append(pts)
start = np.random.randint(10, 30, no_streamlines)
end = np.random.randint(60, 100, no_streamlines)
bundle = [10 * streamline[start[i]:end[i]]
for (i, streamline) in enumerate(bundle)]
bundle = [np.ascontiguousarray(streamline) for streamline in bundle]
return bundle
bundle = simulated_bundles()
print('This bundle has %d streamlines' % len(bundle))
"""
This bundle has 50 streamlines.
Using the ``length`` function we can retrieve the lengths of each streamline.
Below we show the histogram of the lengths of the streamlines.
"""
lengths = list(length(bundle))
import matplotlib.pyplot as plt
fig_hist, ax = plt.subplots(1)
ax.hist(lengths, color='burlywood')
ax.set_xlabel('Length')
ax.set_ylabel('Count')
plt.show()
plt.legend()
plt.savefig('length_histogram.png')
"""
.. figure:: length_histogram.png
:align: center
**Histogram of lengths of the streamlines**
``Length`` will return the length in the units of the coordinate system that
streamlines are currently. So, if the streamlines are in world coordinates then
the lengths will be in millimeters (mm). If the streamlines are for example in
native image coordinates of voxel size 2mm isotropic then you will need to
multiply the lengths by 2 if you want them to correspond to mm. In this example
we process simulated data without units, however this information is good to have
in mind when you calculate lengths with real data.
Next, let's find the number of points that each streamline has.
"""
n_pts = [len(streamline) for streamline in bundle]
"""
Often, streamlines are represented with more points than what is actually
necessary for specific applications. Also, sometimes every streamline has
different number of points which could be of a trouble for some algorithms
. The function ``downsample`` can be used to set the number of points of a
streamline at a specific number and at the same time enforce that all the
segments of the streamline will have equal length.
"""
bundle_downsampled = [downsample(s, 12) for s in bundle]
n_pts_ds = [len(s) for s in bundle_downsampled]
"""
Alternatively, the function ``approx_polygon_track`` allows to reduce the number
of points so that they are more points in curvy regions and less points in
less curvy regions. In contrast with ``downsample`` it does not enforce that
segments should be of equal size.
"""
bundle_downsampled2 = [approx_polygon_track(s, 0.25) for s in bundle]
n_pts_ds2 = [len(streamline) for streamline in bundle_downsampled2]
"""
Both, ``downsample`` and ``approx_polygon_track`` can be thought as methods for
lossy compression of streamlines.
"""
from dipy.viz import fvtk
ren = fvtk.ren()
ren.SetBackground(*fvtk.colors.white)
bundle_actor = fvtk.streamtube(bundle, fvtk.colors.red, linewidth=0.3)
fvtk.add(ren, bundle_actor)
bundle_actor2 = fvtk.streamtube(bundle_downsampled, fvtk.colors.red, linewidth=0.3)
bundle_actor2.SetPosition(0, 40, 0)
bundle_actor3 = fvtk.streamtube(bundle_downsampled2, fvtk.colors.red, linewidth=0.3)
bundle_actor3.SetPosition(0, 80, 0)
fvtk.add(ren, bundle_actor2)
fvtk.add(ren, bundle_actor3)
fvtk.camera(ren, pos=(0, 0, 0), focal=(30, 0, 0))
fvtk.record(ren, out_path='simulated_cosine_bundle.png', size=(900, 900))
"""
.. figure:: simulated_cosine_bundle.png
:align: center
**Initial bundle (down), downsampled at 12 equidistant points (middle), downsampled not equidistantly(up)**
From the figure above we can see that all 3 bundles look quite similar. However,
when we plot the histogram of the number of points used for each streamline, it
becomes obvious that we have managed to reduce in a great amount the size of the
initial dataset.
"""
import matplotlib.pyplot as plt
fig_hist, ax = plt.subplots(1)
ax.hist(n_pts, color='r', histtype='step', label='initial')
ax.hist(n_pts_ds, color='g', histtype='step', label='downsample (12)')
ax.hist(n_pts_ds2, color='b', histtype='step', label='approx_polygon_track (0.25)')
ax.set_xlabel('Number of points')
ax.set_ylabel('Count')
plt.show()
plt.legend()
plt.savefig('n_pts_histogram.png')
"""
.. figure:: n_pts_histogram.png
:align: center
**Histogram of the number of points of the streamlines**
Finally, we can also show that the lengths of the streamlines haven't changed
considerably after applying the two methods of downsampling.
"""
lengths_downsampled = list(length(bundle_downsampled))
lengths_downsampled2 = list(length(bundle_downsampled2))
fig, ax = plt.subplots(1)
ax.plot(lengths, color='r', label='initial')
ax.plot(lengths_downsampled, color='g', label='downsample (12)')
ax.plot(lengths_downsampled2, color='b', label='approx_polygon_track (0.25)')
ax.set_xlabel('Streamline ID')
ax.set_ylabel('Length')
plt.show()
plt.legend()
plt.savefig('lengths_plots.png')
"""
.. figure:: lengths_plots.png
:align: center
**Lengths of each streamline for every one of the 3 bundles**
"""
| bsd-3-clause |
google-research/open-covid-19-data | src/pipeline/join_data.py | 1 | 2228 | # Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import pandas as pd
import os
import load_data
import config
LOCATIONS_PATH = os.path.join(
os.path.abspath(os.path.join(os.path.dirname(__file__), '../../')), 'data/exports/locations/locations.csv')
time_series_data_types = config.get_time_series_data_types()
def get_time_series_data_by_type(config_dict):
time_series_data = {}
for data_type in time_series_data_types:
data_type_data = load_data.load_data_type(data_type, config_dict)
if data_type_data is not None:
time_series_data[data_type] = data_type_data
return time_series_data
def get_time_series_df(config_dict):
joined_df = None
time_series_data_by_type = get_time_series_data_by_type(config_dict)
for df in time_series_data_by_type.values():
if joined_df is None:
joined_df = df
else:
joined_df = joined_df.merge(df, on=['date', 'region_code'], how='outer')
if joined_df is None:
logging.warning('No dataframe loaded for any data type, get_time_series_df returning None.')
return None
else:
location_names_df = pd.read_csv(LOCATIONS_PATH)
location_names_df = location_names_df[['region_code', 'region_name']]
time_series_df = joined_df.merge(location_names_df, on=['region_code'], how='inner')
identifier_cols = ['region_code', 'region_name', 'date']
time_series_df_cols = [c for c in time_series_df.columns if c not in identifier_cols]
time_series_df = time_series_df[identifier_cols + time_series_df_cols]
time_series_df.sort_values(by=['region_code', 'date'])
return time_series_df
| apache-2.0 |
cdd1969/pygwa | lib/flowchart/nodes/n_20_fourierfit/node_fourierfit.py | 1 | 5881 | #!/usr/bin python
# -*- coding: utf-8 -*-
from __future__ import division
import numpy as np
import pandas as pd
from pyqtgraph import BusyCursor
from lib.functions.general import isNumpyDatetime, isNumpyNumeric
from lib.flowchart.nodes.generalNode import NodeWithCtrlWidget, NodeCtrlWidget
from lib.functions.fourier import pandas_fourier_analysis
class fourierFitNode(NodeWithCtrlWidget):
"""Decompose Sinusoidal timeseries curve with Fast Fourier Transformaions"""
nodeName = "FFT"
uiTemplate = [
{'title': 'Signal', 'name': 'sig', 'type': 'list', 'value': None, 'default': None, 'values': [None], 'tip': 'Name of the column with waterlevel data.'},
{'title': 'Datetime', 'name': 'datetime', 'type': 'list', 'value': None, 'default': None, 'values': [None], 'tip': 'Name of the column with datetime (or __index__)'},
{'title': 'Number of Waves', 'name': 'N_MAX_POW', 'type': 'int', 'value': 1, 'limits': (1, 10e10), 'tip': 'Number of partial waves used to generate equation. Partial waves with most powerful frequencies are selected at first. See docs'},
{'title': 'Slice datetime', 'name': 'ranges', 'type': 'bool', 'value': False},
{'title': 'Datetime Start', 'name': 't0', 'type': 'str', 'value': '2015-12-31 00:00:00', 'default': '2015-12-31 00:00:00', 'tip': 'start of the slice region'},
{'title': 'Datetime Stop', 'name': 't1', 'type': 'str', 'value': '2016-01-30 00:00:00', 'default': '2016-01-30 00:00:00', 'tip': 'end of the slice region'},
{'title': 'Generated Equation', 'name': 'eq', 'type': 'text', 'value': '', 'tip': 'This equation is generated after processing. You may copy it to buffer.\nIf you want to access parameters independently consider opening table that is stored in terminal `params`'},
{'title': 'Display plot', 'name': 'plot', 'type': 'action'},
]
def __init__(self, name, parent=None):
terms = {'In': {'io': 'in'}, 'params': {'io': 'out'}, 'f(t)': {'io': 'out'}}
super(fourierFitNode, self).__init__(name, parent=parent, terminals=terms, color=(250, 250, 150, 150))
self._PLOT_REQUESTED = False
self.fig = None
self._df_id = None
def _createCtrlWidget(self, **kwargs):
return fourierFitNodeCtrlWidget(**kwargs)
def process(self, In):
df = In
if df is None:
return
self.CW().param('eq').setValue('')
if self._df_id != id(df):
#print 'df new'
self._df_id = id(df)
self.CW().disconnect_valueChanged2upd(self.CW().param('datetime'))
self.CW().disconnect_valueChanged2upd(self.CW().param('sig'))
colname = [col for col in df.columns if isNumpyDatetime(df[col].dtype)]
self.CW().param('datetime').setLimits(colname)
colname = [col for col in df.columns if isNumpyNumeric(df[col].dtype)]
self.CW().param('sig').setLimits(colname)
self.CW().connect_valueChanged2upd(self.CW().param('datetime'))
self.CW().connect_valueChanged2upd(self.CW().param('sig'))
# ------------------------------------------------------
# now update our range selectors
kwargs = self.CW().prepareInputArguments()
t_vals = df[kwargs['datetime']].values
t_min = pd.to_datetime(str(min(t_vals)))
t_max = pd.to_datetime(str(max(t_vals)))
self.CW().disconnect_valueChanged2upd(self.CW().param('t0'))
self.CW().disconnect_valueChanged2upd(self.CW().param('t1'))
self.CW().param('t0').setValue(t_min.strftime('%Y-%m-%d %H:%M:%S'))
self.CW().param('t0').setDefault(t_min.strftime('%Y-%m-%d %H:%M:%S'))
self.CW().param('t1').setValue(t_max.strftime('%Y-%m-%d %H:%M:%S'))
self.CW().param('t1').setDefault(t_max.strftime('%Y-%m-%d %H:%M:%S'))
if self.CW().p['ranges'] is True:
self.CW().connect_valueChanged2upd(self.CW().param('t0'))
self.CW().connect_valueChanged2upd(self.CW().param('t1'))
# get params once again
kwargs = self.CW().prepareInputArguments()
# ------------------------------------------------------
with BusyCursor():
df_out, eq_str, function, self.fig = pandas_fourier_analysis(df, kwargs['sig'], date_name=kwargs['datetime'], ranges=kwargs['ranges'], N_MAX_POW=kwargs['N_MAX_POW'], generate_plot=True)
self.CW().param('eq').setValue(eq_str)
self._PLOT_REQUESTED = False
return {'params': df_out, 'f(t)': function}
def on_plot_requested(self):
self._PLOT_REQUESTED = True
if self.fig:
self.fig.show()
class fourierFitNodeCtrlWidget(NodeCtrlWidget):
def __init__(self, **kwargs):
super(fourierFitNodeCtrlWidget, self).__init__(**kwargs)
self.disconnect_valueChanged2upd(self.param('eq'))
self.param('plot').sigActivated.connect(self._parent.on_plot_requested)
self.param('ranges').sigValueChanged.connect(self.on_rangesChecked)
def on_rangesChecked(self):
if self.p['ranges'] is True:
self.connect_valueChanged2upd(self.param('t0'))
self.connect_valueChanged2upd(self.param('t1'))
else:
self.disconnect_valueChanged2upd(self.param('t0'))
self.disconnect_valueChanged2upd(self.param('t1'))
def prepareInputArguments(self):
kwargs = dict()
kwargs['ranges'] = (np.datetime64(self.p['t0']+'Z'), np.datetime64(self.p['t1']+'Z')) if self.p['ranges'] else None
kwargs['sig'] = self.p['sig']
kwargs['datetime'] = self.p['datetime']
kwargs['N_MAX_POW'] = self.p['N_MAX_POW']
kwargs['plot'] = False
return kwargs
| gpl-2.0 |
xju2/HZZ_llvv_ws | HZZ_llvv_ws/interpolate_norm_sys.py | 1 | 3630 | # -*- coding: utf-8 -*-
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(os.path.realpath(__file__))), '..')))
from HZZ_llvv_ws import helper
import numpy as np
import matplotlib.pyplot as plt
class interpolate:
r""" interpolate normalization systematic. search the input text files based on
the mass and production mode!
"""
def __init__(self, input_dir, prod):
self.input_dir = input_dir
self.prod = prod
self. masses = [300, 400, 500, 600, 700, 800, 900, 1000, 1200]
self.sys_dic_whole = {}
def get_file_name(self, mass):
return "norm_{}{}_all.txt".format(self.prod, mass)
def run(self):
sys_list = []
for mass in self.masses:
sys_list.append(helper.get_sys(os.path.join(self.input_dir, self.get_file_name(mass))))
# generate masses from linear interpolation
# take the systematic in 300 GeV
self.get_interpolation(sys_list)
for mass in range(300, 1210, 10):
file_full_path = os.path.join(self.input_dir, self.get_file_name(mass))
if os.path.exists(file_full_path):
print file_full_path,"is there"
continue
else:
self.make_sys(mass)
print file_full_path,"is written"
def get_interpolation(self, sys_list):
if len(self.masses) != len(sys_list):
print "inputs does not match", len(self.masses), len(sys_list)
sys_dic = sys_list[0]
for key, np_dic in sys_dic.iteritems():
# loop each section, i.e. category
if key not in self.sys_dic_whole:
self.sys_dic_whole[key] = {}
for nuisance in np_dic.keys():
# loop over systematic
# for each sys, get the value for each masss.
down_list = []
up_list = []
for imass in range(len(self.masses)):
try:
down_value = sys_list[imass][key][nuisance][0]
up_value = sys_list[imass][key][nuisance][1]
except KeyError:
down_value = 0.9999
up_value = 1.0001
down_list.append(down_value)
up_list.append(up_value)
self.sys_dic_whole[key][nuisance] = (down_list, up_list)
#print self.sys_dic_whole
def make_sys(self, mass):
out_text = ""
for key, np_dic in self.sys_dic_whole.iteritems():
out_text += "[{}]\n".format(key)
for nuisance in np_dic.keys():
down_list, up_list = np_dic[nuisance]
down = np.interp(mass, self.masses, down_list)
up = np.interp(mass, self.masses, up_list)
out_text += "{} = {:.4f} {:.4f}\n".format(nuisance, down, up)
## make a plot
#plt.plot(self.masses, down_list, 'o')
#int_list = np.linspace(300, 400, 10)
#down_int = np.interp(int_list, self.masses, down_list)
#plt.plot(int_list, down_int, '-x')
##plt.show()
#plt.savefig('intepolate_{}_{}_{}.png'.format(self.prod, key, nuisance))
#plt.close()
with open(os.path.join(self.input_dir, self.get_file_name(mass)), 'w') as f:
f.write(out_text)
if __name__ == "__main__":
sys_int = interpolate('/afs/cern.ch/user/x/xju/work/h4l/highmass/workspaces/LLVV/NWA_sameBinning/inputs', 'ggH')
sys_int.run()
| mit |
ominux/scikit-learn | sklearn/utils/__init__.py | 1 | 13319 | import numpy as np
import scipy.sparse as sp
import warnings
def assert_all_finite(X):
"""Throw a ValueError if X contains NaN or infinity.
Input MUST be an np.ndarray instance or a scipy.sparse matrix."""
# First try an O(n) time, O(1) space solution for the common case that
# there everything is finite; fall back to O(n) space np.isfinite to
# prevent false positives from overflow in sum method.
if X.dtype.char in np.typecodes['AllFloat'] and not np.isfinite(X.sum()) \
and not np.isfinite(X).all():
raise ValueError("array contains NaN or infinity")
def safe_asanyarray(X, dtype=None, order=None):
if not sp.issparse(X):
X = np.asanyarray(X, dtype, order)
assert_all_finite(X)
return X
def as_float_array(X, overwrite_X=False):
"""Converts a numpy array to an array of floats
The new dtype will be np.float32 or np.float64, depending on the original
type. The function can create a copy or modify the argument depending
on the argument overwrite_X.
WARNING : If X is not of type float, then a copy of X with the right type
will be returned
Parameters
----------
X : array
overwrite_X : bool, optional
if False, a copy of X will be created
Returns
-------
X : array
An array of type np.float
"""
if X.dtype in [np.float32, np.float64]:
if overwrite_X:
return X
else:
return X.copy()
if X.dtype == np.int32:
X = X.astype(np.float32)
else:
X = X.astype(np.float64)
return X
def atleast2d_or_csr(X):
"""Like numpy.atleast_2d, but converts sparse matrices to CSR format"""
X = X.tocsr() if sp.issparse(X) else np.atleast_2d(X)
assert_all_finite(X)
return X
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, int):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def check_arrays(*arrays, **options):
"""Checked that all arrays have consistent first dimensions
Parameters
----------
*arrays : sequence of arrays or scipy.sparse matrices with same shape[0]
Python lists or tuples occurring in arrays are converted to 1D numpy
arrays.
sparse_format : 'csr' or 'csc', None by default
If not None, any scipy.sparse matrix is converted to
Compressed Sparse Rows or Compressed Sparse Columns representations.
copy : boolean, False by default
If copy is True, ensure that returned arrays are copies of the original
(if not already converted to another format earlier in the process).
"""
sparse_format = options.pop('sparse_format', None)
if sparse_format not in (None, 'csr', 'csc'):
raise ValueError('Unexpected sparse format: %r' % sparse_format)
copy = options.pop('copy', False)
if options:
raise ValueError("Unexpected kw arguments: %r" % options.keys())
if len(arrays) == 0:
return None
first = arrays[0]
if not hasattr(first, '__len__') and not hasattr(first, 'shape'):
raise ValueError("Expected python sequence or array, got %r" % first)
n_samples = first.shape[0] if hasattr(first, 'shape') else len(first)
checked_arrays = []
for array in arrays:
array_orig = array
if array is None:
# special case: ignore optional y=None kwarg pattern
checked_arrays.append(array)
continue
if not hasattr(array, '__len__') and not hasattr(array, 'shape'):
raise ValueError("Expected python sequence or array, got %r"
% array)
size = array.shape[0] if hasattr(array, 'shape') else len(array)
if size != n_samples:
raise ValueError("Found array with dim %d. Expected %d" % (
size, n_samples))
if sp.issparse(array):
if sparse_format == 'csr':
array = array.tocsr()
elif sparse_format == 'csc':
array = array.tocsc()
else:
array = np.asanyarray(array)
if copy and array is array_orig:
array = array.copy()
checked_arrays.append(array)
return checked_arrays
def warn_if_not_float(X, estimator='This algorithm'):
"""Warning utility function to check that data type is floating point"""
if not isinstance(estimator, basestring):
estimator = estimator.__class__.__name__
if X.dtype.kind != 'f':
warnings.warn("%s assumes floating point values as input, "
"got %s" % (estimator, X.dtype))
class deprecated(object):
"""Decorator to mark a function or class as deprecated.
Issue a warning when the function is called/the class is instantiated and
adds a warning to the docstring.
The optional extra argument will be appended to the deprecation message
and the docstring. Note: to use this with the default value for extra, put
in an empty of parentheses:
>>> from sklearn.utils import deprecated
>>> deprecated() # doctest: +ELLIPSIS
<sklearn.utils.deprecated object at ...>
>>> @deprecated()
... def some_function(): pass
Deprecating a class takes some work, since we want to run on Python
versions that do not have class decorators:
>>> class Foo(object): pass
...
>>> Foo = deprecated("Use Bar instead")(Foo)
"""
# Adapted from http://wiki.python.org/moin/PythonDecoratorLibrary,
# but with many changes.
def __init__(self, extra=''):
self.extra = extra
def __call__(self, obj):
if isinstance(obj, type):
return self._decorate_class(obj)
else:
return self._decorate_fun(obj)
def _decorate_class(self, cls):
msg = "Class %s is deprecated" % cls.__name__
if self.extra:
msg += "; %s" % self.extra
# FIXME: we should probably reset __new__ for full generality
init = cls.__init__
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return init(*args, **kwargs)
cls.__init__ = wrapped
wrapped.__name__ = '__init__'
wrapped.__doc__ = self._update_doc(init.__doc__)
wrapped.deprecated_original = init
return cls
def _decorate_fun(self, fun):
"""Decorate function fun"""
what = "Function %s" % fun.__name__
msg = "%s is deprecated" % what
if self.extra:
msg += "; %s" % self.extra
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return fun(*args, **kwargs)
wrapped.__name__ = fun.__name__
wrapped.__dict__ = fun.__dict__
wrapped.__doc__ = self._update_doc(fun.__doc__)
return wrapped
def _update_doc(self, olddoc):
newdoc = "DEPRECATED"
if self.extra:
newdoc = "%s: %s" % (newdoc, self.extra)
if olddoc:
newdoc = "%s\n\n%s" % (newdoc, olddoc)
return newdoc
def resample(*arrays, **options):
"""Resample arrays or sparse matrices in a consistent way
The default strategy implements one step of the bootstrapping
procedure.
Parameters
----------
*arrays : sequence of arrays or scipy.sparse matrices with same shape[0]
replace : boolean, True by default
Implements resampling with replacement. If False, this will implement
(sliced) random permutations.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
Return
------
Sequence of resampled views of the collections. The original arrays are
not impacted.
Example
-------
It is possible to mix sparse and dense arrays in the same run::
>>> X = [[1., 0.], [2., 1.], [0., 0.]]
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import resample
>>> X, X_sparse, y = resample(X, X_sparse, y, random_state=0)
>>> X
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<type 'numpy.float64'>'
with 4 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([0, 1, 0])
>>> resample(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:class:`sklearn.cross_validation.Bootstrap`
:func:`sklearn.utils.shuffle`
"""
random_state = check_random_state(options.pop('random_state', None))
replace = options.pop('replace', True)
max_n_samples = options.pop('n_samples', None)
if options:
raise ValueError("Unexpected kw arguments: %r" % options.keys())
if len(arrays) == 0:
return None
first = arrays[0]
n_samples = first.shape[0] if hasattr(first, 'shape') else len(first)
if max_n_samples is None:
max_n_samples = n_samples
if max_n_samples > n_samples:
raise ValueError("Cannot sample %d out of arrays with dim %d" % (
max_n_samples, n_samples))
# To cope with Python 2.5 syntax limitations
kwargs = dict(sparse_format='csr')
arrays = check_arrays(*arrays, **kwargs)
if replace:
indices = random_state.randint(0, n_samples, size=(max_n_samples,))
else:
indices = np.arange(n_samples)
random_state.shuffle(indices)
indices = indices[:max_n_samples]
resampled_arrays = []
for array in arrays:
array = array[indices]
resampled_arrays.append(array)
if len(resampled_arrays) == 1:
# syntactic sugar for the unit argument case
return resampled_arrays[0]
else:
return resampled_arrays
def shuffle(*arrays, **options):
"""Shuffle arrays or sparse matrices in a consistent way
This is a convenience alias to resample(*arrays, replace=False) to do
random permutations of the collections.
Parameters
----------
*arrays : sequence of arrays or scipy.sparse matrices with same shape[0]
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
Return
------
Sequence of shuffled views of the collections. The original arrays are
not impacted.
Example
-------
It is possible to mix sparse and dense arrays in the same run::
>>> X = [[1., 0.], [2., 1.], [0., 0.]]
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import shuffle
>>> X, X_sparse, y = shuffle(X, X_sparse, y, random_state=0)
>>> X
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<type 'numpy.float64'>'
with 3 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([2, 1, 0])
>>> shuffle(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.resample`
"""
options['replace'] = False
return resample(*arrays, **options)
def gen_even_slices(n, n_packs):
"""Generator to create n_packs slices going up to n.
Examples
--------
>>> from sklearn.utils import gen_even_slices
>>> list(gen_even_slices(10, 1))
[slice(0, 10, None)]
>>> list(gen_even_slices(10, 10)) #doctest: +ELLIPSIS
[slice(0, 1, None), slice(1, 2, None), ..., slice(9, 10, None)]
>>> list(gen_even_slices(10, 5)) #doctest: +ELLIPSIS
[slice(0, 2, None), slice(2, 4, None), ..., slice(8, 10, None)]
>>> list(gen_even_slices(10, 3))
[slice(0, 4, None), slice(4, 7, None), slice(7, 10, None)]
"""
start = 0
for pack_num in range(n_packs):
this_n = n // n_packs
if pack_num < n % n_packs:
this_n += 1
if this_n > 0:
end = start + this_n
yield slice(start, end, None)
start = end
| bsd-3-clause |
xzh86/scikit-learn | examples/preprocessing/plot_function_transformer.py | 161 | 1949 | """
=========================================================
Using FunctionTransformer to select columns
=========================================================
Shows how to use a function transformer in a pipeline. If you know your
dataset's first principle component is irrelevant for a classification task,
you can use the FunctionTransformer to select all but the first column of the
PCA transformed data.
"""
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.decomposition import PCA
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import FunctionTransformer
def _generate_vector(shift=0.5, noise=15):
return np.arange(1000) + (np.random.rand(1000) - shift) * noise
def generate_dataset():
"""
This dataset is two lines with a slope ~ 1, where one has
a y offset of ~100
"""
return np.vstack((
np.vstack((
_generate_vector(),
_generate_vector() + 100,
)).T,
np.vstack((
_generate_vector(),
_generate_vector(),
)).T,
)), np.hstack((np.zeros(1000), np.ones(1000)))
def all_but_first_column(X):
return X[:, 1:]
def drop_first_component(X, y):
"""
Create a pipeline with PCA and the column selector and use it to
transform the dataset.
"""
pipeline = make_pipeline(
PCA(), FunctionTransformer(all_but_first_column),
)
X_train, X_test, y_train, y_test = train_test_split(X, y)
pipeline.fit(X_train, y_train)
return pipeline.transform(X_test), y_test
if __name__ == '__main__':
X, y = generate_dataset()
plt.scatter(X[:, 0], X[:, 1], c=y, s=50)
plt.show()
X_transformed, y_transformed = drop_first_component(*generate_dataset())
plt.scatter(
X_transformed[:, 0],
np.zeros(len(X_transformed)),
c=y_transformed,
s=50,
)
plt.show()
| bsd-3-clause |
RachitKansal/scikit-learn | sklearn/preprocessing/tests/test_function_transformer.py | 176 | 2169 | from nose.tools import assert_equal
import numpy as np
from sklearn.preprocessing import FunctionTransformer
def _make_func(args_store, kwargs_store, func=lambda X, *a, **k: X):
def _func(X, *args, **kwargs):
args_store.append(X)
args_store.extend(args)
kwargs_store.update(kwargs)
return func(X)
return _func
def test_delegate_to_func():
# (args|kwargs)_store will hold the positional and keyword arguments
# passed to the function inside the FunctionTransformer.
args_store = []
kwargs_store = {}
X = np.arange(10).reshape((5, 2))
np.testing.assert_array_equal(
FunctionTransformer(_make_func(args_store, kwargs_store)).transform(X),
X,
'transform should have returned X unchanged',
)
# The function should only have recieved X.
assert_equal(
args_store,
[X],
'Incorrect positional arguments passed to func: {args}'.format(
args=args_store,
),
)
assert_equal(
kwargs_store,
{},
'Unexpected keyword arguments passed to func: {args}'.format(
args=kwargs_store,
),
)
# reset the argument stores.
args_store[:] = [] # python2 compatible inplace list clear.
kwargs_store.clear()
y = object()
np.testing.assert_array_equal(
FunctionTransformer(
_make_func(args_store, kwargs_store),
pass_y=True,
).transform(X, y),
X,
'transform should have returned X unchanged',
)
# The function should have recieved X and y.
assert_equal(
args_store,
[X, y],
'Incorrect positional arguments passed to func: {args}'.format(
args=args_store,
),
)
assert_equal(
kwargs_store,
{},
'Unexpected keyword arguments passed to func: {args}'.format(
args=kwargs_store,
),
)
def test_np_log():
X = np.arange(10).reshape((5, 2))
# Test that the numpy.log example still works.
np.testing.assert_array_equal(
FunctionTransformer(np.log1p).transform(X),
np.log1p(X),
)
| bsd-3-clause |
aabadie/scikit-learn | sklearn/svm/classes.py | 22 | 41116 | import warnings
import numpy as np
from .base import _fit_liblinear, BaseSVC, BaseLibSVM
from ..base import BaseEstimator, RegressorMixin
from ..linear_model.base import LinearClassifierMixin, SparseCoefMixin, \
LinearModel
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_X_y
from ..utils.validation import _num_samples
from ..utils.multiclass import check_classification_targets
class LinearSVC(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Linear Support Vector Classification.
Similar to SVC with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input and the multiclass support
is handled according to a one-vs-the-rest scheme.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
loss : string, 'hinge' or 'squared_hinge' (default='squared_hinge')
Specifies the loss function. 'hinge' is the standard SVM loss
(used e.g. by the SVC class) while 'squared_hinge' is the
square of the hinge loss.
penalty : string, 'l1' or 'l2' (default='l2')
Specifies the norm used in the penalization. The 'l2'
penalty is the standard used in SVC. The 'l1' leads to ``coef_``
vectors that are sparse.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
multi_class: string, 'ovr' or 'crammer_singer' (default='ovr')
Determines the multi-class strategy if `y` contains more than
two classes.
``"ovr"`` trains n_classes one-vs-rest classifiers, while ``"crammer_singer"``
optimizes a joint objective over all classes.
While `crammer_singer` is interesting from a theoretical perspective
as it is consistent, it is seldom used in practice as it rarely leads
to better accuracy and is more expensive to compute.
If ``"crammer_singer"`` is chosen, the options loss, penalty and dual will
be ignored.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
``[x, self.intercept_scaling]``,
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to ``class_weight[i]*C`` for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2 else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
``coef_`` is a readonly property derived from ``raw_coef_`` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon
to have slightly different results for the same input data. If
that happens, try with a smaller ``tol`` parameter.
The underlying implementation, liblinear, uses a sparse internal
representation for the data that will incur a memory copy.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
References
----------
`LIBLINEAR: A Library for Large Linear Classification
<http://www.csie.ntu.edu.tw/~cjlin/liblinear/>`__
See also
--------
SVC
Implementation of Support Vector Machine classifier using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
Furthermore SVC multi-class mode is implemented using one
vs one scheme while LinearSVC uses one vs the rest. It is
possible to implement one vs the rest with SVC by using the
:class:`sklearn.multiclass.OneVsRestClassifier` wrapper.
Finally SVC can fit dense data without memory copy if the input
is C-contiguous. Sparse data will still incur memory copy though.
sklearn.linear_model.SGDClassifier
SGDClassifier can optimize the same cost function as LinearSVC
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, penalty='l2', loss='squared_hinge', dual=True, tol=1e-4,
C=1.0, multi_class='ovr', fit_intercept=True,
intercept_scaling=1, class_weight=None, verbose=0,
random_state=None, max_iter=1000):
self.dual = dual
self.tol = tol
self.C = C
self.multi_class = multi_class
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.penalty = penalty
self.loss = loss
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
sample_weight : array-like, shape = [n_samples], optional
Array of weights that are assigned to individual
samples. If not provided,
then each sample is given unit weight.
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
if self.loss in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'hinge', 'l2': 'squared_hinge'}.get(self.loss)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
check_classification_targets(y)
self.classes_ = np.unique(y)
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, self.multi_class,
self.loss, sample_weight=sample_weight)
if self.multi_class == "crammer_singer" and len(self.classes_) == 2:
self.coef_ = (self.coef_[1] - self.coef_[0]).reshape(1, -1)
if self.fit_intercept:
intercept = self.intercept_[1] - self.intercept_[0]
self.intercept_ = np.array([intercept])
return self
class LinearSVR(LinearModel, RegressorMixin):
"""Linear Support Vector Regression.
Similar to SVR with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term. The penalty is a squared
l2 penalty. The bigger this parameter, the less regularization is used.
loss : string, 'epsilon_insensitive' or 'squared_epsilon_insensitive' (default='epsilon_insensitive')
Specifies the loss function. 'l1' is the epsilon-insensitive loss
(standard SVR) while 'l2' is the squared epsilon-insensitive loss.
epsilon : float, optional (default=0.1)
Epsilon parameter in the epsilon-insensitive loss function. Note
that the value of this parameter depends on the scale of the target
variable y. If unsure, set ``epsilon=0``.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2 else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
LinearSVC
Implementation of Support Vector Machine classifier using the
same library as this class (liblinear).
SVR
Implementation of Support Vector Machine regression using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
sklearn.linear_model.SGDRegressor
SGDRegressor can optimize the same cost function as LinearSVR
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, epsilon=0.0, tol=1e-4, C=1.0,
loss='epsilon_insensitive', fit_intercept=True,
intercept_scaling=1., dual=True, verbose=0,
random_state=None, max_iter=1000):
self.tol = tol
self.C = C
self.epsilon = epsilon
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.dual = dual
self.loss = loss
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
sample_weight : array-like, shape = [n_samples], optional
Array of weights that are assigned to individual
samples. If not provided,
then each sample is given unit weight.
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
if self.loss in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'epsilon_insensitive',
'l2': 'squared_epsilon_insensitive'
}.get(self.loss)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
penalty = 'l2' # SVR only accepts l2 penalty
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
None, penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, loss=self.loss,
epsilon=self.epsilon, sample_weight=sample_weight)
self.coef_ = self.coef_.ravel()
return self
class SVC(BaseSVC):
"""C-Support Vector Classification.
The implementation is based on libsvm. The fit time complexity
is more than quadratic with the number of samples which makes it hard
to scale to dataset with more than a couple of 10000 samples.
The multiclass support is handled according to a one-vs-one scheme.
For details on the precise mathematical formulation of the provided
kernel functions and how `gamma`, `coef0` and `degree` affect each
other, see the corresponding section in the narrative documentation:
:ref:`svm_kernels`.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to pre-compute the kernel matrix from data matrices; that matrix
should be an array of shape ``(n_samples, n_samples)``.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') decision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.19.
.. versionadded:: 0.17
*decision_function_shape='ovr'* is recommended.
.. versionchanged:: 0.17
Deprecated *decision_function_shape='ovo' and None*.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in the
SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import SVC
>>> clf = SVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, probability=False, random_state=None, shrinking=True,
tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVR
Support Vector Machine for Regression implemented using libsvm.
LinearSVC
Scalable Linear Support Vector Machine for classification
implemented using liblinear. Check the See also section of
LinearSVC for more comparison element.
"""
def __init__(self, C=1.0, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None,
verbose=False, max_iter=-1, decision_function_shape=None,
random_state=None):
super(SVC, self).__init__(
impl='c_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class NuSVC(BaseSVC):
"""Nu-Support Vector Classification.
Similar to SVC but uses a parameter to control the number of support
vectors.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
nu : float, optional (default=0.5)
An upper bound on the fraction of training errors and a lower
bound of the fraction of support vectors. Should be in the
interval (0, 1].
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one. The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies as
``n_samples / (n_classes * np.bincount(y))``
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') decision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.19.
.. versionadded:: 0.17
*decision_function_shape='ovr'* is recommended.
.. versionchanged:: 0.17
Deprecated *decision_function_shape='ovo' and None*.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in
the SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import NuSVC
>>> clf = NuSVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVC(cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, nu=0.5, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVC
Support Vector Machine for classification using libsvm.
LinearSVC
Scalable linear Support Vector Machine for classification using
liblinear.
"""
def __init__(self, nu=0.5, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None, verbose=False,
max_iter=-1, decision_function_shape=None, random_state=None):
super(NuSVC, self).__init__(
impl='nu_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=0., nu=nu, shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class SVR(BaseLibSVM, RegressorMixin):
"""Epsilon-Support Vector Regression.
The free parameters in the model are C and epsilon.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
epsilon : float, optional (default=0.1)
Epsilon in the epsilon-SVR model. It specifies the epsilon-tube
within which no penalty is associated in the training loss function
with points predicted within a distance epsilon from the actual
value.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
sample_weight : array-like, shape = [n_samples]
Individual weights for each sample
Examples
--------
>>> from sklearn.svm import SVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = SVR(C=1.0, epsilon=0.2)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.2, gamma='auto',
kernel='rbf', max_iter=-1, shrinking=True, tol=0.001, verbose=False)
See also
--------
NuSVR
Support Vector Machine for regression implemented using libsvm
using a parameter to control the number of support vectors.
LinearSVR
Scalable Linear Support Vector Machine for regression
implemented using liblinear.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, C=1.0, epsilon=0.1, shrinking=True,
cache_size=200, verbose=False, max_iter=-1):
super(SVR, self).__init__(
'epsilon_svr', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., epsilon=epsilon, verbose=verbose,
shrinking=shrinking, probability=False, cache_size=cache_size,
class_weight=None, max_iter=max_iter, random_state=None)
class NuSVR(BaseLibSVM, RegressorMixin):
"""Nu Support Vector Regression.
Similar to NuSVC, for regression, uses a parameter nu to control
the number of support vectors. However, unlike NuSVC, where nu
replaces C, here nu replaces the parameter epsilon of epsilon-SVR.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
nu : float, optional
An upper bound on the fraction of training errors and a lower bound of
the fraction of support vectors. Should be in the interval (0, 1]. By
default 0.5 will be taken.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import NuSVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = NuSVR(C=1.0, nu=0.1)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVR(C=1.0, cache_size=200, coef0=0.0, degree=3, gamma='auto',
kernel='rbf', max_iter=-1, nu=0.1, shrinking=True, tol=0.001,
verbose=False)
See also
--------
NuSVC
Support Vector Machine for classification implemented with libsvm
with a parameter to control the number of support vectors.
SVR
epsilon Support Vector Machine for regression implemented with libsvm.
"""
def __init__(self, nu=0.5, C=1.0, kernel='rbf', degree=3,
gamma='auto', coef0=0.0, shrinking=True, tol=1e-3,
cache_size=200, verbose=False, max_iter=-1):
super(NuSVR, self).__init__(
'nu_svr', kernel=kernel, degree=degree, gamma=gamma, coef0=coef0,
tol=tol, C=C, nu=nu, epsilon=0., shrinking=shrinking,
probability=False, cache_size=cache_size, class_weight=None,
verbose=verbose, max_iter=max_iter, random_state=None)
class OneClassSVM(BaseLibSVM):
"""Unsupervised Outlier Detection.
Estimate the support of a high-dimensional distribution.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_outlier_detection>`.
Parameters
----------
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
nu : float, optional
An upper bound on the fraction of training
errors and a lower bound of the fraction of support
vectors. Should be in the interval (0, 1]. By default 0.5
will be taken.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
tol : float, optional
Tolerance for stopping criterion.
shrinking : boolean, optional
Whether to use the shrinking heuristic.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [n_classes-1, n_SV]
Coefficients of the support vectors in the decision function.
coef_ : array, shape = [n_classes-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
intercept_ : array, shape = [n_classes-1]
Constants in decision function.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, nu=0.5, shrinking=True, cache_size=200,
verbose=False, max_iter=-1, random_state=None):
super(OneClassSVM, self).__init__(
'one_class', kernel, degree, gamma, coef0, tol, 0., nu, 0.,
shrinking, False, cache_size, None, verbose, max_iter,
random_state)
def fit(self, X, y=None, sample_weight=None, **params):
"""
Detects the soft boundary of the set of samples X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Set of samples, where n_samples is the number of samples and
n_features is the number of features.
sample_weight : array-like, shape (n_samples,)
Per-sample weights. Rescale C per sample. Higher weights
force the classifier to put more emphasis on these points.
Returns
-------
self : object
Returns self.
Notes
-----
If X is not a C-ordered contiguous array it is copied.
"""
super(OneClassSVM, self).fit(X, np.ones(_num_samples(X)),
sample_weight=sample_weight, **params)
return self
def decision_function(self, X):
"""Distance of the samples X to the separating hyperplane.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
X : array-like, shape (n_samples,)
Returns the decision function of the samples.
"""
dec = self._decision_function(X)
return dec
| bsd-3-clause |
bastorer/SPINSpy | Demo/make_movies.py | 1 | 4177 | import matplotlib
matplotlib.use('Agg')
import matpy as mp
import numpy as np
import spinspy as spy
import matplotlib.pyplot as plt
import matplotlib.animation as anim
import sys, os, shutil, tempfile
import subprocess
from mpl_toolkits.axes_grid1 import make_axes_locatable
try: # Try using mpi
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
num_procs = comm.Get_size()
except:
rank = 0
num_procs = 1
## USER CHANGE THIS SECTION
out_direct = os.getcwd() # Where to put the movies
# the directory needs to already exist!
the_name = '2D_Turbulence' # What to call the movies
# the variable name will be appended
out_suffix = 'mp4' # Movie type
mov_fps = 15 # Framerate for movie
cmap = 'darkjet'
# Load some information
dat = spy.get_shape()
x,y = spy.grid()
##
## USER SHOULDN'T NEED TO CHANGE ANYTHING AFTER THIS
# Prepare directories
if rank == 0:
print('Video files will be saved in {0:s}'.format(out_direct))
tmp_dir = tempfile.mkdtemp(dir=out_direct)
fig_prefix = tmp_dir + '/' + the_name # path for saving frames
out_prefix = out_direct + '/' + the_name # path for saving videos
for proc in range(1,num_procs):
comm.send(tmp_dir,dest=proc,tag=1)
comm.send(fig_prefix,dest=proc,tag=2)
comm.send(out_prefix,dest=proc,tag=3)
else:
tmp_dir = comm.recv(source=0,tag=1)
fig_prefix = comm.recv(source=0,tag=2)
out_prefix = comm.recv(source=0,tag=3)
# Initialize the three meshes
fig = plt.figure()
plt.title('q')
sp1 = plt.subplot(1,1,1)
# Initial pcolormesh objects
QM1 = plt.pcolormesh(x/1e3,y/1e3,np.zeros((dat.Ny,dat.Nx)),cmap=cmap)
plt.axis('tight')
# Initialize colorbars
div1 = make_axes_locatable(sp1)
cax1 = div1.append_axes("bottom", size="5%", pad=0.25)
cbar1 = plt.colorbar(QM1, cax=cax1, format="%.2g", orientation='horizontal')
cax1.yaxis.set_visible(False)
cax1.set_xticklabels(cax1.get_xticklabels(),rotation=70,ha='center')
# Start at var0 and keep going until we run out.
ii = rank # parallel, so start where necessary
cont = True
while cont:
try:
q_ii = spy.reader('q',ii,[0,-1],[0,-1],ordering='matlab')
if ii % 1 == 0:
print('Processor {0:d} accessing {1:d}'.format(rank,ii))
# Now trim off the last row and column of to_plt.
# Why? Because it's going to anyways (unless X1 and X2)
# are 1 bigger in each direction, which, if it's SPINS,
# it won't. Normally this wouldn't be a problem, but
# in this case it is because we are updating a plot, a
# process that requires ravel, so it gets confused about
# what it needs to trim, so we pre-trim for it.
q_ii = q_ii[:-1,:-1]
QM1.set_array(np.ravel(q_ii))
cv_xy = np.max(np.abs(np.ravel(q_ii)))
if cv_xy == 0:
cv_xy = 1
QM1.set_clim((-cv_xy,cv_xy))
QM1.changed()
fig.suptitle('ii = {0:04d}'.format(ii))
plt.draw()
fig.savefig('{0:s}-{1:05d}.png'.format(fig_prefix,ii))
ii += num_procs # Parallel, so skip a `bunch'
except:
cont = False
var_figs = '{0:s}-%05d.png'.format(fig_prefix)
# Have processor 0 wait for the others
if num_procs > 1:
if rank > 0:
isdone = True
comm.send(isdone, dest=0, tag=rank)
print('Processor {0:d} done.'.format(rank))
elif rank == 0:
isdone = False
for proc in range(1,num_procs):
isdone = comm.recv(source=proc,tag=proc)
# Now that the individual files have been written, we need to parse them into a movie.
if rank == 0:
in_name = var_figs
out_name = '{0:s}.{1:s}'.format(out_prefix,out_suffix)
cmd = ['ffmpeg', '-framerate', str(mov_fps), '-r', str(mov_fps),
'-i', in_name, '-y', '-q', '1', '-pix_fmt', 'yuv420p', out_name]
subprocess.call(cmd)
print('--------')
print('Deleting directory of intermediate frames.')
shutil.rmtree(tmp_dir)
print('Video creation complete.')
print('Processor {0:d} done.'.format(rank))
| mit |
fabioticconi/scikit-learn | examples/text/document_clustering.py | 42 | 8335 | """
=======================================
Clustering text documents using k-means
=======================================
This is an example showing how the scikit-learn can be used to cluster
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
Two feature extraction methods can be used in this example:
- TfidfVectorizer uses a in-memory vocabulary (a python dict) to map the most
frequent words to features indices and hence compute a word occurrence
frequency (sparse) matrix. The word frequencies are then reweighted using
the Inverse Document Frequency (IDF) vector collected feature-wise over
the corpus.
- HashingVectorizer hashes word occurrences to a fixed dimensional space,
possibly with collisions. The word count vectors are then normalized to
each have l2-norm equal to one (projected to the euclidean unit-ball) which
seems to be important for k-means to work in high dimensional space.
HashingVectorizer does not provide IDF weighting as this is a stateless
model (the fit method does nothing). When IDF weighting is needed it can
be added by pipelining its output to a TfidfTransformer instance.
Two algorithms are demoed: ordinary k-means and its more scalable cousin
minibatch k-means.
Additionally, latent semantic analysis can also be used to reduce dimensionality
and discover latent patterns in the data.
It can be noted that k-means (and minibatch k-means) are very sensitive to
feature scaling and that in this case the IDF weighting helps improve the
quality of the clustering by quite a lot as measured against the "ground truth"
provided by the class label assignments of the 20 newsgroups dataset.
This improvement is not visible in the Silhouette Coefficient which is small
for both as this measure seem to suffer from the phenomenon called
"Concentration of Measure" or "Curse of Dimensionality" for high dimensional
datasets such as text data. Other measures such as V-measure and Adjusted Rand
Index are information theoretic based evaluation scores: as they are only based
on cluster assignments rather than distances, hence not affected by the curse
of dimensionality.
Note: as k-means is optimizing a non-convex objective function, it will likely
end up in a local optimum. Several runs with independent random init might be
necessary to get a good convergence.
"""
# Author: Peter Prettenhofer <[email protected]>
# Lars Buitinck
# License: BSD 3 clause
from __future__ import print_function
from sklearn.datasets import fetch_20newsgroups
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from sklearn import metrics
from sklearn.cluster import KMeans, MiniBatchKMeans
import logging
from optparse import OptionParser
import sys
from time import time
import numpy as np
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--lsa",
dest="n_components", type="int",
help="Preprocess documents with latent semantic analysis.")
op.add_option("--no-minibatch",
action="store_false", dest="minibatch", default=True,
help="Use ordinary k-means algorithm (in batch mode).")
op.add_option("--no-idf",
action="store_false", dest="use_idf", default=True,
help="Disable Inverse Document Frequency feature weighting.")
op.add_option("--use-hashing",
action="store_true", default=False,
help="Use a hashing feature vectorizer")
op.add_option("--n-features", type=int, default=10000,
help="Maximum number of features (dimensions)"
" to extract from text.")
op.add_option("--verbose",
action="store_true", dest="verbose", default=False,
help="Print progress reports inside k-means algorithm.")
print(__doc__)
op.print_help()
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
dataset = fetch_20newsgroups(subset='all', categories=categories,
shuffle=True, random_state=42)
print("%d documents" % len(dataset.data))
print("%d categories" % len(dataset.target_names))
print()
labels = dataset.target
true_k = np.unique(labels).shape[0]
print("Extracting features from the training dataset using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
if opts.use_idf:
# Perform an IDF normalization on the output of HashingVectorizer
hasher = HashingVectorizer(n_features=opts.n_features,
stop_words='english', non_negative=True,
norm=None, binary=False)
vectorizer = make_pipeline(hasher, TfidfTransformer())
else:
vectorizer = HashingVectorizer(n_features=opts.n_features,
stop_words='english',
non_negative=False, norm='l2',
binary=False)
else:
vectorizer = TfidfVectorizer(max_df=0.5, max_features=opts.n_features,
min_df=2, stop_words='english',
use_idf=opts.use_idf)
X = vectorizer.fit_transform(dataset.data)
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X.shape)
print()
if opts.n_components:
print("Performing dimensionality reduction using LSA")
t0 = time()
# Vectorizer results are normalized, which makes KMeans behave as
# spherical k-means for better results. Since LSA/SVD results are
# not normalized, we have to redo the normalization.
svd = TruncatedSVD(opts.n_components)
normalizer = Normalizer(copy=False)
lsa = make_pipeline(svd, normalizer)
X = lsa.fit_transform(X)
print("done in %fs" % (time() - t0))
explained_variance = svd.explained_variance_ratio_.sum()
print("Explained variance of the SVD step: {}%".format(
int(explained_variance * 100)))
print()
###############################################################################
# Do the actual clustering
if opts.minibatch:
km = MiniBatchKMeans(n_clusters=true_k, init='k-means++', n_init=1,
init_size=1000, batch_size=1000, verbose=opts.verbose)
else:
km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1,
verbose=opts.verbose)
print("Clustering sparse data with %s" % km)
t0 = time()
km.fit(X)
print("done in %0.3fs" % (time() - t0))
print()
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, km.labels_))
print("Completeness: %0.3f" % metrics.completeness_score(labels, km.labels_))
print("V-measure: %0.3f" % metrics.v_measure_score(labels, km.labels_))
print("Adjusted Rand-Index: %.3f"
% metrics.adjusted_rand_score(labels, km.labels_))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, km.labels_, sample_size=1000))
print()
if not opts.use_hashing:
print("Top terms per cluster:")
if opts.n_components:
original_space_centroids = svd.inverse_transform(km.cluster_centers_)
order_centroids = original_space_centroids.argsort()[:, ::-1]
else:
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
terms = vectorizer.get_feature_names()
for i in range(true_k):
print("Cluster %d:" % i, end='')
for ind in order_centroids[i, :10]:
print(' %s' % terms[ind], end='')
print()
| bsd-3-clause |
mrshu/scikit-learn | examples/linear_model/plot_sparse_recovery.py | 1 | 7103 | """
============================================================
Sparse recovery: feature selection for sparse linear models
============================================================
Given a small number of observations, we want to recover which features
of X are relevant to explain y. For this :ref:`sparse linear models
<l1_feature_selection>` can outperform standard statistical tests if the
true model is sparse, i.e. if a small fraction of the features are
relevant.
As detailed in :ref:`the compressive sensing notes
<compressive_sensing>`, the ability of L1-based approach to identify the
relevant variables depends on the sparsity of the ground truth, the
number of samples, the number of features, the conditionning of the
design matrix on the signal subspace, the amount of noise, and the
absolute value of the smallest non-zero coefficient [Wainwright2006]
(http://statistics.berkeley.edu/tech-reports/709.pdf).
Here we keep all parameters constant and vary the conditionning of the
design matrix. For a well-conditionned design matrix (small mutual
incoherence) we are exactly in compressive sensing conditions (i.i.d
Gaussian sensing matrix), and L1-recovery with the Lasso performs very
well. For an ill-conditionned matrix (high mutual incoherence),
regressors are very correlated, and the Lasso randomly selects one.
However, randomized-Lasso can recover the ground truth well.
In each situation, we first vary the alpha parameter setting the sparsity
of the estimated model and look at the stability scores of the randomized
Lasso. This analysis, knowing the ground truth, shows an optimal regime
in which relevant features stand out from the irrelevant ones. If alpha
is chosen too small, non-relevant variables enter the model. On the
opposite, if alpha is selected too large, the Lasso is equivalent to
stepwise regression, and thus brings no advantage over a univariate
F-test.
In a second time, we set alpha and compare the performance of different
feature selection methods, using the area under curve (AUC) of the
precision-recall.
"""
print __doc__
# Author: Alexandre Gramfort and Gael Varoquaux
# License: BSD
import pylab as pl
import numpy as np
from scipy import linalg
from sklearn.linear_model import (RandomizedLasso, lasso_stability_path,
LassoLarsCV)
from sklearn.feature_selection import f_regression
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import auc, precision_recall_curve
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.utils.extmath import pinvh
def mutual_incoherence(X_relevant, X_irelevant):
"""Mutual incoherence, as defined by formula (26a) of [Wainwright2006].
"""
projector = np.dot(np.dot(X_irelevant.T, X_relevant),
pinvh(np.dot(X_relevant.T, X_relevant)))
return np.max(np.abs(projector).sum(axis=1))
for conditionning in (1, 1e-4):
###########################################################################
# Simulate regression data with a correlated design
n_features = 501
n_relevant_features = 3
noise_level = .2
coef_min = .2
# The Donoho-Tanner phase transition is around n_samples=25: below we
# will completely fail to recover in the well-conditionned case
n_samples = 25
block_size = n_relevant_features
rng = np.random.RandomState(42)
# The coefficients of our model
coef = np.zeros(n_features)
coef[:n_relevant_features] = coef_min + rng.rand(n_relevant_features)
# The correlation of our design: variables correlated by blocs of 3
corr = np.zeros((n_features, n_features))
for i in range(0, n_features, block_size):
corr[i:i + block_size, i:i + block_size] = 1 - conditionning
corr.flat[::n_features + 1] = 1
corr = linalg.cholesky(corr)
# Our design
X = rng.normal(size=(n_samples, n_features))
X = np.dot(X, corr)
# Keep [Wainwright2006] (26c) constant
X[:n_relevant_features] /= np.abs(
linalg.svdvals(X[:n_relevant_features])).max()
X = StandardScaler().fit_transform(X.copy())
# The output variable
y = np.dot(X, coef)
y /= np.std(y)
# We scale the added noise as a function of the average correlation
# between the design and the output variable
y += noise_level * rng.normal(size=n_samples)
mi = mutual_incoherence(X[:, :n_relevant_features],
X[:, n_relevant_features:])
###########################################################################
# Plot stability selection path, using a high eps for early stopping
# of the path, to save computation time
alpha_grid, scores_path = lasso_stability_path(X, y, random_state=42,
eps=0.05)
pl.figure()
# We plot the path as a function of alpha/alpha_max to the power 1/3: the
# power 1/3 scales the path less brutally than the log, and enables to
# see the progression along the path
hg = pl.plot(alpha_grid[1:] ** .333, scores_path[coef != 0].T[1:], 'r')
hb = pl.plot(alpha_grid[1:] ** .333, scores_path[coef == 0].T[1:], 'k')
ymin, ymax = pl.ylim()
pl.xlabel(r'$(\alpha / \alpha_{max})^{1/3}$')
pl.ylabel('Stability score: proportion of times selected')
pl.title('Stability Scores Path - Mutual incoherence: %.1f' % mi)
pl.axis('tight')
pl.legend((hg[0], hb[0]), ('relevant features', 'irrelevant features'),
loc='best')
###########################################################################
# Plot the estimated stability scores for a given alpha
# Use 6-fold cross-validation rather than the default 3-fold: it leads to
# a better choice of alpha:
lars_cv = LassoLarsCV(cv=6).fit(X, y)
# Run the RandomizedLasso: we use a paths going down to .1*alpha_max
# to avoid exploring the regime in which very noisy variables enter
# the model
alphas = np.linspace(lars_cv.alphas_[0], .1 * lars_cv.alphas_[0], 6)
clf = RandomizedLasso(alpha=alphas, random_state=42).fit(X, y)
trees = ExtraTreesRegressor(100, compute_importances=True).fit(X, y)
# Compare with F-score
F, _ = f_regression(X, y)
pl.figure()
for name, score in [('F-test', F),
('Stability selection', clf.scores_),
('Lasso coefs', np.abs(lars_cv.coef_)),
('Trees', trees.feature_importances_),
]:
precision, recall, thresholds = precision_recall_curve(coef != 0,
score)
pl.semilogy(np.maximum(score / np.max(score), 1e-4),
label="%s. AUC: %.3f" % (name, auc(recall, precision)))
pl.plot(np.where(coef != 0)[0], [2e-4] * n_relevant_features, 'mo',
label="Ground truth")
pl.xlabel("Features")
pl.ylabel("Score")
# Plot only the 100 first coefficients
pl.xlim(0, 100)
pl.legend(loc='best')
pl.title('Feature selection scores - Mutual incoherence: %.1f'
% mi)
pl.show()
| bsd-3-clause |
MohammedWasim/scikit-learn | sklearn/cross_decomposition/cca_.py | 209 | 3150 | from .pls_ import _PLS
__all__ = ['CCA']
class CCA(_PLS):
"""CCA Canonical Correlation Analysis.
CCA inherits from PLS with mode="B" and deflation_mode="canonical".
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, (default 2).
number of components to keep.
scale : boolean, (default True)
whether to scale the data?
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop
tol : non-negative real, default 1e-06.
the tolerance used in the iterative algorithm
copy : boolean
Whether the deflation be done on a copy. Let the default value
to True unless you don't care about side effects
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
For each component k, find the weights u, v that maximizes
max corr(Xk u, Yk v), such that ``|u| = |v| = 1``
Note that it maximizes only the correlations between the scores.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score.
Examples
--------
>>> from sklearn.cross_decomposition import CCA
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [3.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> cca = CCA(n_components=1)
>>> cca.fit(X, Y)
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
CCA(copy=True, max_iter=500, n_components=1, scale=True, tol=1e-06)
>>> X_c, Y_c = cca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSSVD
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="canonical", mode="B",
norm_y_weights=True, algorithm="nipals",
max_iter=max_iter, tol=tol, copy=copy)
| bsd-3-clause |
sinharrajesh/dbtools | twitter-analysis/appendpanda.py | 1 | 1148 | #!/usr/bin/python
# appendpandas allusers.csv alltweets.csv useranalysis (opfile)
import json
import logging
import sys
import csv
import pandas as pd
if __name__ == '__main__':
_loggingLevel = logging.INFO ## How much trace
logger = logging.getLogger(__name__)
logging.basicConfig(level=_loggingLevel)
names=['handle','#tweets','#followers','#friends','Joining Date']
userdf = pd.read_csv(sys.argv[1], header=None, delimiter='$', names = names)
t_names=['tweetType','handle']
tweetdf = pd.read_csv(sys.argv[2], header=None, delimiter='$', names=t_names, usecols=[1,4])
x = pd.pivot_table(tweetdf, index=['handle'], columns=['tweetType'], aggfunc=len, fill_value=0)
x = x.reset_index()
summary = pd.merge(x, userdf, how='left', left_on=['handle'], right_on=['handle'])
summary['Total'] = summary['Reply'] + summary['Tweet'] + summary['Retweet']
summary['n-cubit related %age'] = summary['Total']/summary['#tweets']
summary.reindex(columns=['handle','Tweet','Retweet','Reply','Total','#tweets','#followers','#friends','Joining Date','n-cubit related %age']).to_csv(sys.argv[3], index=False)
| apache-2.0 |
imaculate/scikit-learn | examples/cross_decomposition/plot_compare_cross_decomposition.py | 55 | 4761 | """
===================================
Compare cross decomposition methods
===================================
Simple usage of various cross decomposition algorithms:
- PLSCanonical
- PLSRegression, with multivariate response, a.k.a. PLS2
- PLSRegression, with univariate response, a.k.a. PLS1
- CCA
Given 2 multivariate covarying two-dimensional datasets, X, and Y,
PLS extracts the 'directions of covariance', i.e. the components of each
datasets that explain the most shared variance between both datasets.
This is apparent on the **scatterplot matrix** display: components 1 in
dataset X and dataset Y are maximally correlated (points lie around the
first diagonal). This is also true for components 2 in both dataset,
however, the correlation across datasets for different components is
weak: the point cloud is very spherical.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_decomposition import PLSCanonical, PLSRegression, CCA
###############################################################################
# Dataset based latent variables model
n = 500
# 2 latents vars:
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X_train = X[:n / 2]
Y_train = Y[:n / 2]
X_test = X[n / 2:]
Y_test = Y[n / 2:]
print("Corr(X)")
print(np.round(np.corrcoef(X.T), 2))
print("Corr(Y)")
print(np.round(np.corrcoef(Y.T), 2))
###############################################################################
# Canonical (symmetric) PLS
# Transform data
# ~~~~~~~~~~~~~~
plsca = PLSCanonical(n_components=2)
plsca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
# Scatter plot of scores
# ~~~~~~~~~~~~~~~~~~~~~~
# 1) On diagonal plot X vs Y scores on each components
plt.figure(figsize=(12, 8))
plt.subplot(221)
plt.plot(X_train_r[:, 0], Y_train_r[:, 0], "ob", label="train")
plt.plot(X_test_r[:, 0], Y_test_r[:, 0], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 1: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 0], Y_test_r[:, 0])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
plt.subplot(224)
plt.plot(X_train_r[:, 1], Y_train_r[:, 1], "ob", label="train")
plt.plot(X_test_r[:, 1], Y_test_r[:, 1], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 2: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 1], Y_test_r[:, 1])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
# 2) Off diagonal plot components 1 vs 2 for X and Y
plt.subplot(222)
plt.plot(X_train_r[:, 0], X_train_r[:, 1], "*b", label="train")
plt.plot(X_test_r[:, 0], X_test_r[:, 1], "*r", label="test")
plt.xlabel("X comp. 1")
plt.ylabel("X comp. 2")
plt.title('X comp. 1 vs X comp. 2 (test corr = %.2f)'
% np.corrcoef(X_test_r[:, 0], X_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.subplot(223)
plt.plot(Y_train_r[:, 0], Y_train_r[:, 1], "*b", label="train")
plt.plot(Y_test_r[:, 0], Y_test_r[:, 1], "*r", label="test")
plt.xlabel("Y comp. 1")
plt.ylabel("Y comp. 2")
plt.title('Y comp. 1 vs Y comp. 2 , (test corr = %.2f)'
% np.corrcoef(Y_test_r[:, 0], Y_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.show()
###############################################################################
# PLS regression, with multivariate response, a.k.a. PLS2
n = 1000
q = 3
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
B = np.array([[1, 2] + [0] * (p - 2)] * q).T
# each Yj = 1*X1 + 2*X2 + noize
Y = np.dot(X, B) + np.random.normal(size=n * q).reshape((n, q)) + 5
pls2 = PLSRegression(n_components=3)
pls2.fit(X, Y)
print("True B (such that: Y = XB + Err)")
print(B)
# compare pls2.coef_ with B
print("Estimated B")
print(np.round(pls2.coef_, 1))
pls2.predict(X)
###############################################################################
# PLS regression, with univariate response, a.k.a. PLS1
n = 1000
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
y = X[:, 0] + 2 * X[:, 1] + np.random.normal(size=n * 1) + 5
pls1 = PLSRegression(n_components=3)
pls1.fit(X, y)
# note that the number of components exceeds 1 (the dimension of y)
print("Estimated betas")
print(np.round(pls1.coef_, 1))
###############################################################################
# CCA (PLS mode B with symmetric deflation)
cca = CCA(n_components=2)
cca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
| bsd-3-clause |
neilhan/tensorflow | tensorflow/contrib/learn/python/learn/dataframe/tensorflow_dataframe.py | 11 | 29369 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlowDataFrame implements convenience functions using TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe import dataframe as df
from tensorflow.contrib.learn.python.learn.dataframe.transforms import batch
from tensorflow.contrib.learn.python.learn.dataframe.transforms import csv_parser
from tensorflow.contrib.learn.python.learn.dataframe.transforms import example_parser
from tensorflow.contrib.learn.python.learn.dataframe.transforms import in_memory_source
from tensorflow.contrib.learn.python.learn.dataframe.transforms import reader_source
from tensorflow.contrib.learn.python.learn.dataframe.transforms import sparsify
from tensorflow.contrib.learn.python.learn.dataframe.transforms import split_mask
from tensorflow.python.client import session as sess
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner as qr
def _expand_file_names(filepatterns):
"""Takes a list of file patterns and returns a list of resolved file names."""
if not isinstance(filepatterns, (list, tuple, set)):
filepatterns = [filepatterns]
filenames = set()
for filepattern in filepatterns:
names = set(gfile.Glob(filepattern))
filenames |= names
return list(filenames)
def _dtype_to_nan(dtype):
if dtype is dtypes.string:
return b""
elif dtype.is_integer:
return np.nan
elif dtype.is_floating:
return np.nan
elif dtype is dtypes.bool:
return np.nan
else:
raise ValueError("Can't parse type without NaN into sparse tensor: %s" %
dtype)
def _get_default_value(feature_spec):
if isinstance(feature_spec, parsing_ops.FixedLenFeature):
return feature_spec.default_value
else:
return _dtype_to_nan(feature_spec.dtype)
class TensorFlowDataFrame(df.DataFrame):
"""TensorFlowDataFrame implements convenience functions using TensorFlow."""
def run(self,
num_batches=None,
graph=None,
session=None,
start_queues=True,
initialize_variables=True,
**kwargs):
"""Builds and runs the columns of the `DataFrame` and yields batches.
This is a generator that yields a dictionary mapping column names to
evaluated columns.
Args:
num_batches: the maximum number of batches to produce. If none specified,
the returned value will iterate through infinite batches.
graph: the `Graph` in which the `DataFrame` should be built.
session: the `Session` in which to run the columns of the `DataFrame`.
start_queues: if true, queues will be started before running and halted
after producting `n` batches.
initialize_variables: if true, variables will be initialized.
**kwargs: Additional keyword arguments e.g. `num_epochs`.
Yields:
A dictionary, mapping column names to the values resulting from running
each column for a single batch.
"""
if graph is None:
graph = ops.get_default_graph()
with graph.as_default():
if session is None:
session = sess.Session()
self_built = self.build(**kwargs)
keys = list(self_built.keys())
cols = list(self_built.values())
if initialize_variables:
if variables.local_variables():
session.run(variables.initialize_local_variables())
if variables.all_variables():
session.run(variables.initialize_all_variables())
if start_queues:
coord = coordinator.Coordinator()
threads = qr.start_queue_runners(sess=session, coord=coord)
i = 0
while num_batches is None or i < num_batches:
i += 1
try:
values = session.run(cols)
yield collections.OrderedDict(zip(keys, values))
except errors.OutOfRangeError:
break
if start_queues:
coord.request_stop()
coord.join(threads)
def select_rows(self, boolean_series):
"""Returns a `DataFrame` with only the rows indicated by `boolean_series`.
Note that batches may no longer have consistent size after calling
`select_rows`, so the new `DataFrame` may need to be rebatched.
For example:
'''
filtered_df = df.select_rows(df["country"] == "jp").batch(64)
'''
Args:
boolean_series: a `Series` that evaluates to a boolean `Tensor`.
Returns:
A new `DataFrame` with the same columns as `self`, but selecting only the
rows where `boolean_series` evaluated to `True`.
"""
result = type(self)()
for key, col in self._columns.items():
try:
result[key] = col.select_rows(boolean_series)
except AttributeError as e:
raise NotImplementedError((
"The select_rows method is not implemented for Series type {}. "
"Original error: {}").format(type(col), e))
return result
def split(self, index_series, proportion, batch_size=None):
"""Deterministically split a `DataFrame` into two `DataFrame`s.
Note this split is only as deterministic as the underlying hash function;
see `tf.string_to_hash_bucket_fast`. The hash function is deterministic
for a given binary, but may change occasionally. The only way to achieve
an absolute guarantee that the split `DataFrame`s do not change across runs
is to materialize them.
Note too that the allocation of a row to one partition or the
other is evaluated independently for each row, so the exact number of rows
in each partition is binomially distributed.
Args:
index_series: a `Series` of unique strings, whose hash will determine the
partitioning; or the name in this `DataFrame` of such a `Series`.
(This `Series` must contain strings because TensorFlow provides hash
ops only for strings, and there are no number-to-string converter ops.)
proportion: The proportion of the rows to select for the 'left'
partition; the remaining (1 - proportion) rows form the 'right'
partition.
batch_size: the batch size to use when rebatching the left and right
`DataFrame`s. If None (default), the `DataFrame`s are not rebatched;
thus their batches will have variable sizes, according to which rows
are selected from each batch of the original `DataFrame`.
Returns:
Two `DataFrame`s containing the partitioned rows.
"""
if isinstance(index_series, str):
index_series = self[index_series]
left_mask, = split_mask.SplitMask(proportion)(index_series)
right_mask = ~left_mask
left_rows = self.select_rows(left_mask)
right_rows = self.select_rows(right_mask)
if batch_size:
left_rows = left_rows.batch(batch_size=batch_size, shuffle=False)
right_rows = right_rows.batch(batch_size=batch_size, shuffle=False)
return left_rows, right_rows
def split_fast(self, index_series, proportion, batch_size,
base_batch_size=1000):
"""Deterministically split a `DataFrame` into two `DataFrame`s.
Note this split is only as deterministic as the underlying hash function;
see `tf.string_to_hash_bucket_fast`. The hash function is deterministic
for a given binary, but may change occasionally. The only way to achieve
an absolute guarantee that the split `DataFrame`s do not change across runs
is to materialize them.
Note too that the allocation of a row to one partition or the
other is evaluated independently for each row, so the exact number of rows
in each partition is binomially distributed.
Args:
index_series: a `Series` of unique strings, whose hash will determine the
partitioning; or the name in this `DataFrame` of such a `Series`.
(This `Series` must contain strings because TensorFlow provides hash
ops only for strings, and there are no number-to-string converter ops.)
proportion: The proportion of the rows to select for the 'left'
partition; the remaining (1 - proportion) rows form the 'right'
partition.
batch_size: the batch size to use when rebatching the left and right
`DataFrame`s. If None (default), the `DataFrame`s are not rebatched;
thus their batches will have variable sizes, according to which rows
are selected from each batch of the original `DataFrame`.
base_batch_size: the batch size to use for materialized data, prior to the
split.
Returns:
Two `DataFrame`s containing the partitioned rows.
"""
if isinstance(index_series, str):
index_series = self[index_series]
left_mask, = split_mask.SplitMask(proportion)(index_series)
right_mask = ~left_mask
self["left_mask__"] = left_mask
self["right_mask__"] = right_mask
# TODO(soergel): instead of base_batch_size can we just do one big batch?
# avoid computing the hashes twice
m = self.materialize_to_memory(batch_size=base_batch_size)
left_rows_df = m.select_rows(m["left_mask__"])
right_rows_df = m.select_rows(m["right_mask__"])
del left_rows_df[["left_mask__", "right_mask__"]]
del right_rows_df[["left_mask__", "right_mask__"]]
# avoid recomputing the split repeatedly
left_rows_df = left_rows_df.materialize_to_memory(batch_size=batch_size)
right_rows_df = right_rows_df.materialize_to_memory(batch_size=batch_size)
return left_rows_df, right_rows_df
def run_one_batch(self):
"""Creates a new 'Graph` and `Session` and runs a single batch.
Returns:
A dictionary mapping column names to numpy arrays that contain a single
batch of the `DataFrame`.
"""
return list(self.run(num_batches=1))[0]
def run_one_epoch(self):
"""Creates a new 'Graph` and `Session` and runs a single epoch.
Naturally this makes sense only for DataFrames that fit in memory.
Returns:
A dictionary mapping column names to numpy arrays that contain a single
epoch of the `DataFrame`.
"""
# batches is a list of dicts of numpy arrays
batches = [b for b in self.run(num_epochs=1)]
# first invert that to make a dict of lists of numpy arrays
pivoted_batches = {}
for k in batches[0].keys():
pivoted_batches[k] = []
for b in batches:
for k, v in b.items():
pivoted_batches[k].append(v)
# then concat the arrays in each column
result = {k: np.concatenate(column_batches)
for k, column_batches in pivoted_batches.items()}
return result
def materialize_to_memory(self, batch_size):
unordered_dict_of_arrays = self.run_one_epoch()
# there may already be an 'index' column, in which case from_ordereddict)
# below will complain because it wants to generate a new one.
# for now, just remove it.
# TODO(soergel): preserve index history, potentially many levels deep
del unordered_dict_of_arrays["index"]
# the order of the columns in this dict is arbitrary; we just need it to
# remain consistent.
ordered_dict_of_arrays = collections.OrderedDict(unordered_dict_of_arrays)
return TensorFlowDataFrame.from_ordereddict(ordered_dict_of_arrays,
batch_size=batch_size)
def batch(self,
batch_size,
shuffle=False,
num_threads=1,
queue_capacity=None,
min_after_dequeue=None,
seed=None):
"""Resize the batches in the `DataFrame` to the given `batch_size`.
Args:
batch_size: desired batch size.
shuffle: whether records should be shuffled. Defaults to true.
num_threads: the number of enqueueing threads.
queue_capacity: capacity of the queue that will hold new batches.
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
Returns:
A `DataFrame` with `batch_size` rows.
"""
column_names = list(self._columns.keys())
if shuffle:
batcher = batch.ShuffleBatch(batch_size,
output_names=column_names,
num_threads=num_threads,
queue_capacity=queue_capacity,
min_after_dequeue=min_after_dequeue,
seed=seed)
else:
batcher = batch.Batch(batch_size,
output_names=column_names,
num_threads=num_threads,
queue_capacity=queue_capacity)
batched_series = batcher(list(self._columns.values()))
dataframe = type(self)()
dataframe.assign(**(dict(zip(column_names, batched_series))))
return dataframe
@classmethod
def _from_csv_base(cls, filepatterns, get_default_values, has_header,
column_names, num_threads, enqueue_size,
batch_size, queue_capacity, min_after_dequeue, shuffle,
seed):
"""Create a `DataFrame` from CSV files.
If `has_header` is false, then `column_names` must be specified. If
`has_header` is true and `column_names` are specified, then `column_names`
overrides the names in the header.
Args:
filepatterns: a list of file patterns that resolve to CSV files.
get_default_values: a function that produces a list of default values for
each column, given the column names.
has_header: whether or not the CSV files have headers.
column_names: a list of names for the columns in the CSV files.
num_threads: the number of readers that will work in parallel.
enqueue_size: block size for each read operation.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed lines.
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
Returns:
A `DataFrame` that has columns corresponding to `features` and is filled
with examples from `filepatterns`.
Raises:
ValueError: no files match `filepatterns`.
ValueError: `features` contains the reserved name 'index'.
"""
filenames = _expand_file_names(filepatterns)
if not filenames:
raise ValueError("No matching file names.")
if column_names is None:
if not has_header:
raise ValueError("If column_names is None, has_header must be true.")
with gfile.GFile(filenames[0]) as f:
column_names = csv.DictReader(f).fieldnames
if "index" in column_names:
raise ValueError(
"'index' is reserved and can not be used for a column name.")
default_values = get_default_values(column_names)
reader_kwargs = {"skip_header_lines": (1 if has_header else 0)}
index, value = reader_source.TextFileSource(
filenames,
reader_kwargs=reader_kwargs,
enqueue_size=enqueue_size,
batch_size=batch_size,
queue_capacity=queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
num_threads=num_threads,
seed=seed)()
parser = csv_parser.CSVParser(column_names, default_values)
parsed = parser(value)
column_dict = parsed._asdict()
column_dict["index"] = index
dataframe = cls()
dataframe.assign(**column_dict)
return dataframe
@classmethod
def from_csv(cls,
filepatterns,
default_values,
has_header=True,
column_names=None,
num_threads=1,
enqueue_size=None,
batch_size=32,
queue_capacity=None,
min_after_dequeue=None,
shuffle=True,
seed=None):
"""Create a `DataFrame` from CSV files.
If `has_header` is false, then `column_names` must be specified. If
`has_header` is true and `column_names` are specified, then `column_names`
overrides the names in the header.
Args:
filepatterns: a list of file patterns that resolve to CSV files.
default_values: a list of default values for each column.
has_header: whether or not the CSV files have headers.
column_names: a list of names for the columns in the CSV files.
num_threads: the number of readers that will work in parallel.
enqueue_size: block size for each read operation.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed lines.
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
Returns:
A `DataFrame` that has columns corresponding to `features` and is filled
with examples from `filepatterns`.
Raises:
ValueError: no files match `filepatterns`.
ValueError: `features` contains the reserved name 'index'.
"""
def get_default_values(column_names):
# pylint: disable=unused-argument
return default_values
return cls._from_csv_base(filepatterns, get_default_values, has_header,
column_names, num_threads,
enqueue_size, batch_size, queue_capacity,
min_after_dequeue, shuffle, seed)
@classmethod
def from_csv_with_feature_spec(cls,
filepatterns,
feature_spec,
has_header=True,
column_names=None,
num_threads=1,
enqueue_size=None,
batch_size=32,
queue_capacity=None,
min_after_dequeue=None,
shuffle=True,
seed=None):
"""Create a `DataFrame` from CSV files, given a feature_spec.
If `has_header` is false, then `column_names` must be specified. If
`has_header` is true and `column_names` are specified, then `column_names`
overrides the names in the header.
Args:
filepatterns: a list of file patterns that resolve to CSV files.
feature_spec: a dict mapping column names to `FixedLenFeature` or
`VarLenFeature`.
has_header: whether or not the CSV files have headers.
column_names: a list of names for the columns in the CSV files.
num_threads: the number of readers that will work in parallel.
enqueue_size: block size for each read operation.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed lines.
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
Returns:
A `DataFrame` that has columns corresponding to `features` and is filled
with examples from `filepatterns`.
Raises:
ValueError: no files match `filepatterns`.
ValueError: `features` contains the reserved name 'index'.
"""
def get_default_values(column_names):
return [_get_default_value(feature_spec[name]) for name in column_names]
dataframe = cls._from_csv_base(filepatterns, get_default_values, has_header,
column_names, num_threads,
enqueue_size, batch_size, queue_capacity,
min_after_dequeue, shuffle, seed)
# replace the dense columns with sparse ones in place in the dataframe
for name in dataframe.columns():
if name != "index" and isinstance(feature_spec[name],
parsing_ops.VarLenFeature):
strip_value = _get_default_value(feature_spec[name])
(dataframe[name],) = sparsify.Sparsify(strip_value)(dataframe[name])
return dataframe
@classmethod
def from_examples(cls,
filepatterns,
features,
reader_cls=io_ops.TFRecordReader,
num_threads=1,
enqueue_size=None,
batch_size=32,
queue_capacity=None,
min_after_dequeue=None,
shuffle=True,
seed=None):
"""Create a `DataFrame` from `tensorflow.Example`s.
Args:
filepatterns: a list of file patterns containing `tensorflow.Example`s.
features: a dict mapping feature names to `VarLenFeature` or
`FixedLenFeature`.
reader_cls: a subclass of `tensorflow.ReaderBase` that will be used to
read the `Example`s.
num_threads: the number of readers that will work in parallel.
enqueue_size: block size for each read operation.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed `Example`s
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
Returns:
A `DataFrame` that has columns corresponding to `features` and is filled
with `Example`s from `filepatterns`.
Raises:
ValueError: no files match `filepatterns`.
ValueError: `features` contains the reserved name 'index'.
"""
filenames = _expand_file_names(filepatterns)
if not filenames:
raise ValueError("No matching file names.")
if "index" in features:
raise ValueError(
"'index' is reserved and can not be used for a feature name.")
index, record = reader_source.ReaderSource(
reader_cls,
filenames,
enqueue_size=enqueue_size,
batch_size=batch_size,
queue_capacity=queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
num_threads=num_threads,
seed=seed)()
parser = example_parser.ExampleParser(features)
parsed = parser(record)
column_dict = parsed._asdict()
column_dict["index"] = index
dataframe = cls()
dataframe.assign(**column_dict)
return dataframe
@classmethod
def from_pandas(cls,
pandas_dataframe,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
min_after_dequeue=None,
shuffle=True,
seed=None,
data_name="pandas_data"):
"""Create a `tf.learn.DataFrame` from a `pandas.DataFrame`.
Args:
pandas_dataframe: `pandas.DataFrame` that serves as a data source.
num_threads: the number of threads to use for enqueueing.
enqueue_size: the number of rows to enqueue per step.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed `Example`s
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
data_name: a scope name identifying the data.
Returns:
A `tf.learn.DataFrame` that contains batches drawn from the given
`pandas_dataframe`.
"""
pandas_source = in_memory_source.PandasSource(
pandas_dataframe,
num_threads=num_threads,
enqueue_size=enqueue_size,
batch_size=batch_size,
queue_capacity=queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
seed=seed,
data_name=data_name)
dataframe = cls()
dataframe.assign(**(pandas_source()._asdict()))
return dataframe
@classmethod
def from_numpy(cls,
numpy_array,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
min_after_dequeue=None,
shuffle=True,
seed=None,
data_name="numpy_data"):
"""Creates a `tf.learn.DataFrame` from a `numpy.ndarray`.
The returned `DataFrame` contains two columns: 'index' and 'value'. The
'value' column contains a row from the array. The 'index' column contains
the corresponding row number.
Args:
numpy_array: `numpy.ndarray` that serves as a data source.
num_threads: the number of threads to use for enqueueing.
enqueue_size: the number of rows to enqueue per step.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed `Example`s
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
data_name: a scope name identifying the data.
Returns:
A `tf.learn.DataFrame` that contains batches drawn from the given
array.
"""
numpy_source = in_memory_source.NumpySource(
numpy_array,
num_threads=num_threads,
enqueue_size=enqueue_size,
batch_size=batch_size,
queue_capacity=queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
seed=seed,
data_name=data_name)
dataframe = cls()
dataframe.assign(**(numpy_source()._asdict()))
return dataframe
@classmethod
def from_ordereddict(cls,
ordered_dict_of_arrays,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
min_after_dequeue=None,
shuffle=True,
seed=None,
data_name="numpy_data"):
"""Creates a `tf.learn.DataFrame` from an `OrderedDict` of `numpy.ndarray`.
The returned `DataFrame` contains a column for each key of the dict plus an
extra 'index' column. The 'index' column contains the row number. Each of
the other columns contains a row from the corresponding array.
Args:
ordered_dict_of_arrays: `OrderedDict` of `numpy.ndarray` that serves as a
data source.
num_threads: the number of threads to use for enqueueing.
enqueue_size: the number of rows to enqueue per step.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed `Example`s
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
data_name: a scope name identifying the data.
Returns:
A `tf.learn.DataFrame` that contains batches drawn from the given arrays.
Raises:
ValueError: `ordered_dict_of_arrays` contains the reserved name 'index'.
"""
numpy_source = in_memory_source.OrderedDictNumpySource(
ordered_dict_of_arrays,
num_threads=num_threads,
enqueue_size=enqueue_size,
batch_size=batch_size,
queue_capacity=queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
seed=seed,
data_name=data_name)
dataframe = cls()
dataframe.assign(**(numpy_source()._asdict()))
return dataframe
| apache-2.0 |
Mistobaan/tensorflow | tensorflow/contrib/losses/python/metric_learning/metric_loss_ops_test.py | 41 | 20535 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for triplet_semihard_loss."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.losses.python import metric_learning as metric_loss_ops
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.platform import test
try:
# pylint: disable=g-import-not-at-top
from sklearn import datasets
from sklearn import metrics
HAS_SKLEARN = True
except ImportError:
HAS_SKLEARN = False
def pairwise_distance_np(feature, squared=False):
"""Computes the pairwise distance matrix in numpy.
Args:
feature: 2-D numpy array of size [number of data, feature dimension]
squared: Boolean. If true, output is the pairwise squared euclidean
distance matrix; else, output is the pairwise euclidean distance matrix.
Returns:
pairwise_distances: 2-D numpy array of size
[number of data, number of data].
"""
triu = np.triu_indices(feature.shape[0], 1)
upper_tri_pdists = np.linalg.norm(feature[triu[1]] - feature[triu[0]], axis=1)
if squared:
upper_tri_pdists **= 2.
num_data = feature.shape[0]
pairwise_distances = np.zeros((num_data, num_data))
pairwise_distances[np.triu_indices(num_data, 1)] = upper_tri_pdists
# Make symmetrical.
pairwise_distances = pairwise_distances + pairwise_distances.T - np.diag(
pairwise_distances.diagonal())
return pairwise_distances
class ContrastiveLossTest(test.TestCase):
def testContrastive(self):
with self.test_session():
num_data = 10
feat_dim = 6
margin = 1.0
embeddings_anchor = np.random.rand(num_data, feat_dim).astype(np.float32)
embeddings_positive = np.random.rand(num_data, feat_dim).astype(
np.float32)
labels = np.random.randint(0, 2, size=(num_data,)).astype(np.float32)
# Compute the loss in NP
dist = np.sqrt(
np.sum(np.square(embeddings_anchor - embeddings_positive), axis=1))
loss_np = np.mean(
labels * np.square(dist) +
(1.0 - labels) * np.square(np.maximum(margin - dist, 0.0)))
# Compute the loss with TF
loss_tf = metric_loss_ops.contrastive_loss(
labels=ops.convert_to_tensor(labels),
embeddings_anchor=ops.convert_to_tensor(embeddings_anchor),
embeddings_positive=ops.convert_to_tensor(embeddings_positive),
margin=margin)
loss_tf = loss_tf.eval()
self.assertAllClose(loss_np, loss_tf)
class TripletSemiHardLossTest(test.TestCase):
def testTripletSemiHard(self):
with self.test_session():
num_data = 10
feat_dim = 6
margin = 1.0
num_classes = 4
embedding = np.random.rand(num_data, feat_dim).astype(np.float32)
labels = np.random.randint(
0, num_classes, size=(num_data)).astype(np.float32)
# Reshape labels to compute adjacency matrix.
labels_reshaped = np.reshape(labels, (labels.shape[0], 1))
# Compute the loss in NP.
adjacency = np.equal(labels_reshaped, labels_reshaped.T)
pdist_matrix = pairwise_distance_np(embedding, squared=True)
loss_np = 0.0
num_positives = 0.0
for i in range(num_data):
for j in range(num_data):
if adjacency[i][j] > 0.0 and i != j:
num_positives += 1.0
pos_distance = pdist_matrix[i][j]
neg_distances = []
for k in range(num_data):
if adjacency[i][k] == 0:
neg_distances.append(pdist_matrix[i][k])
# Sort by distance.
neg_distances.sort()
chosen_neg_distance = neg_distances[0]
for l in range(len(neg_distances)):
chosen_neg_distance = neg_distances[l]
if chosen_neg_distance > pos_distance:
break
loss_np += np.maximum(
0.0, margin - chosen_neg_distance + pos_distance)
loss_np /= num_positives
# Compute the loss in TF.
loss_tf = metric_loss_ops.triplet_semihard_loss(
labels=ops.convert_to_tensor(labels),
embeddings=ops.convert_to_tensor(embedding),
margin=margin)
loss_tf = loss_tf.eval()
self.assertAllClose(loss_np, loss_tf)
class LiftedStructLossTest(test.TestCase):
def testLiftedStruct(self):
with self.test_session():
num_data = 10
feat_dim = 6
margin = 1.0
num_classes = 4
embedding = np.random.rand(num_data, feat_dim).astype(np.float32)
labels = np.random.randint(
0, num_classes, size=(num_data)).astype(np.float32)
# Reshape labels to compute adjacency matrix.
labels_reshaped = np.reshape(labels, (labels.shape[0], 1))
# Compute the loss in NP
adjacency = np.equal(labels_reshaped, labels_reshaped.T)
pdist_matrix = pairwise_distance_np(embedding)
loss_np = 0.0
num_constraints = 0.0
for i in range(num_data):
for j in range(num_data):
if adjacency[i][j] > 0.0 and i != j:
d_pos = pdist_matrix[i][j]
negs = []
for k in range(num_data):
if not adjacency[i][k]:
negs.append(margin - pdist_matrix[i][k])
for l in range(num_data):
if not adjacency[j][l]:
negs.append(margin - pdist_matrix[j][l])
negs = np.array(negs)
max_elem = np.max(negs)
negs -= max_elem
negs = np.exp(negs)
soft_maximum = np.log(np.sum(negs)) + max_elem
num_constraints += 1.0
this_loss = max(soft_maximum + d_pos, 0)
loss_np += this_loss * this_loss
loss_np = loss_np / num_constraints / 2.0
# Compute the loss in TF
loss_tf = metric_loss_ops.lifted_struct_loss(
labels=ops.convert_to_tensor(labels),
embeddings=ops.convert_to_tensor(embedding),
margin=margin)
loss_tf = loss_tf.eval()
self.assertAllClose(loss_np, loss_tf)
def convert_to_list_of_sparse_tensor(np_matrix):
list_of_sparse_tensors = []
nrows, ncols = np_matrix.shape
for i in range(nrows):
sp_indices = []
for j in range(ncols):
if np_matrix[i][j] == 1:
sp_indices.append([j])
num_non_zeros = len(sp_indices)
list_of_sparse_tensors.append(sparse_tensor.SparseTensor(
indices=np.array(sp_indices),
values=np.ones((num_non_zeros,)),
dense_shape=np.array([ncols,])))
return list_of_sparse_tensors
class NpairsLossTest(test.TestCase):
def testNpairs(self):
with self.test_session():
num_data = 15
feat_dim = 6
num_classes = 5
reg_lambda = 0.02
embeddings_anchor = np.random.rand(num_data, feat_dim).astype(np.float32)
embeddings_positive = np.random.rand(num_data, feat_dim).astype(
np.float32)
labels = np.random.randint(
0, num_classes, size=(num_data)).astype(np.float32)
# Reshape labels to compute adjacency matrix.
labels_reshaped = np.reshape(labels, (labels.shape[0], 1))
# Compute the loss in NP
reg_term = np.mean(np.sum(np.square(embeddings_anchor), 1))
reg_term += np.mean(np.sum(np.square(embeddings_positive), 1))
reg_term *= 0.25 * reg_lambda
similarity_matrix = np.matmul(embeddings_anchor, embeddings_positive.T)
labels_remapped = np.equal(
labels_reshaped, labels_reshaped.T).astype(np.float32)
labels_remapped /= np.sum(labels_remapped, axis=1, keepdims=True)
xent_loss = math_ops.reduce_mean(nn.softmax_cross_entropy_with_logits(
logits=ops.convert_to_tensor(similarity_matrix),
labels=ops.convert_to_tensor(labels_remapped))).eval()
loss_np = xent_loss + reg_term
# Compute the loss in TF
loss_tf = metric_loss_ops.npairs_loss(
labels=ops.convert_to_tensor(labels),
embeddings_anchor=ops.convert_to_tensor(embeddings_anchor),
embeddings_positive=ops.convert_to_tensor(embeddings_positive),
reg_lambda=reg_lambda)
loss_tf = loss_tf.eval()
self.assertAllClose(loss_np, loss_tf)
class NpairsLossMultiLabelTest(test.TestCase):
def testNpairsMultiLabelLossWithSingleLabelEqualsNpairsLoss(self):
with self.test_session():
num_data = 15
feat_dim = 6
reg_lambda = 0.02
embeddings_anchor = np.random.rand(num_data, feat_dim).astype(np.float32)
embeddings_positive = np.random.rand(num_data, feat_dim).astype(
np.float32)
labels = np.arange(num_data)
labels = np.reshape(labels, -1)
# Compute vanila npairs loss.
loss_npairs = metric_loss_ops.npairs_loss(
labels=ops.convert_to_tensor(labels),
embeddings_anchor=ops.convert_to_tensor(embeddings_anchor),
embeddings_positive=ops.convert_to_tensor(embeddings_positive),
reg_lambda=reg_lambda).eval()
# Compute npairs multilabel loss.
labels_one_hot = np.identity(num_data)
loss_npairs_multilabel = metric_loss_ops.npairs_loss_multilabel(
sparse_labels=convert_to_list_of_sparse_tensor(labels_one_hot),
embeddings_anchor=ops.convert_to_tensor(embeddings_anchor),
embeddings_positive=ops.convert_to_tensor(embeddings_positive),
reg_lambda=reg_lambda).eval()
self.assertAllClose(loss_npairs, loss_npairs_multilabel)
def testNpairsMultiLabel(self):
with self.test_session():
num_data = 15
feat_dim = 6
num_classes = 10
reg_lambda = 0.02
embeddings_anchor = np.random.rand(num_data, feat_dim).astype(np.float32)
embeddings_positive = np.random.rand(num_data, feat_dim).astype(
np.float32)
labels = np.random.randint(0, 2, (num_data, num_classes))
# set entire column to one so that each row has at least one bit set.
labels[:, -1] = 1
# Compute the loss in NP
reg_term = np.mean(np.sum(np.square(embeddings_anchor), 1))
reg_term += np.mean(np.sum(np.square(embeddings_positive), 1))
reg_term *= 0.25 * reg_lambda
similarity_matrix = np.matmul(embeddings_anchor, embeddings_positive.T)
labels_remapped = np.dot(labels, labels.T).astype(np.float)
labels_remapped /= np.sum(labels_remapped, 1, keepdims=True)
xent_loss = math_ops.reduce_mean(nn.softmax_cross_entropy_with_logits(
logits=ops.convert_to_tensor(similarity_matrix),
labels=ops.convert_to_tensor(labels_remapped))).eval()
loss_np = xent_loss + reg_term
# Compute the loss in TF
loss_tf = metric_loss_ops.npairs_loss_multilabel(
sparse_labels=convert_to_list_of_sparse_tensor(labels),
embeddings_anchor=ops.convert_to_tensor(embeddings_anchor),
embeddings_positive=ops.convert_to_tensor(embeddings_positive),
reg_lambda=reg_lambda)
loss_tf = loss_tf.eval()
self.assertAllClose(loss_np, loss_tf)
def compute_ground_truth_cluster_score(feat, y):
y_unique = np.unique(y)
score_gt_np = 0.0
for c in y_unique:
feat_subset = feat[y == c, :]
pdist_subset = pairwise_distance_np(feat_subset)
score_gt_np += -1.0 * np.min(np.sum(pdist_subset, axis=0))
score_gt_np = score_gt_np.astype(np.float32)
return score_gt_np
def compute_cluster_loss_numpy(feat,
y,
margin_multiplier=1.0,
enable_pam_finetuning=True):
if enable_pam_finetuning:
facility = ForwardGreedyFacility(
n_clusters=np.unique(y).size).pam_augmented_fit(feat, y,
margin_multiplier)
else:
facility = ForwardGreedyFacility(
n_clusters=np.unique(y).size).loss_augmented_fit(feat, y,
margin_multiplier)
score_augmented = facility.score_aug_
score_gt = compute_ground_truth_cluster_score(feat, y)
return np.maximum(np.float32(0.0), score_augmented - score_gt)
class ForwardGreedyFacility(object):
def __init__(self, n_clusters=8):
self.n_clusters = n_clusters
self.center_ics_ = None
def _check_init_args(self):
# Check n_clusters.
if (self.n_clusters is None or self.n_clusters <= 0 or
not isinstance(self.n_clusters, int)):
raise ValueError('n_clusters has to be nonnegative integer.')
def loss_augmented_fit(self, feat, y, loss_mult):
"""Fit K-Medoids to the provided data."""
self._check_init_args()
# Check that the array is good and attempt to convert it to
# Numpy array if possible.
feat = self._check_array(feat)
# Apply distance metric to get the distance matrix.
pdists = pairwise_distance_np(feat)
num_data = feat.shape[0]
candidate_ids = list(range(num_data))
candidate_scores = np.zeros(num_data,)
subset = []
k = 0
while k < self.n_clusters:
candidate_scores = []
for i in candidate_ids:
# push i to subset.
subset.append(i)
marginal_cost = -1.0 * np.sum(np.min(pdists[:, subset], axis=1))
loss = 1.0 - metrics.normalized_mutual_info_score(
y, self._get_cluster_ics(pdists, subset))
candidate_scores.append(marginal_cost + loss_mult * loss)
# remove i from subset.
subset.pop()
# push i_star to subset.
i_star = candidate_ids[np.argmax(candidate_scores)]
subset.append(i_star)
# remove i_star from candidate indices.
candidate_ids.remove(i_star)
k += 1
# Expose labels_ which are the assignments of
# the training data to clusters.
self.labels_ = self._get_cluster_ics(pdists, subset)
# Expose cluster centers, i.e. medoids.
self.cluster_centers_ = feat.take(subset, axis=0)
# Expose indices of chosen cluster centers.
self.center_ics_ = subset
# Expose the score = -\sum_{i \in V} min_{j \in S} || x_i - x_j ||
self.score_ = np.float32(-1.0) * self._get_facility_distance(pdists, subset)
self.score_aug_ = self.score_ + loss_mult * (
1.0 - metrics.normalized_mutual_info_score(
y, self._get_cluster_ics(pdists, subset)))
self.score_aug_ = self.score_aug_.astype(np.float32)
# Expose the chosen cluster indices.
self.subset_ = subset
return self
def _augmented_update_medoid_ics_in_place(self, pdists, y_gt, cluster_ics,
medoid_ics, loss_mult):
for cluster_idx in range(self.n_clusters):
# y_pred = self._get_cluster_ics(D, medoid_ics)
# Don't prematurely do the assignment step.
# Do this after we've updated all cluster medoids.
y_pred = cluster_ics
if sum(y_pred == cluster_idx) == 0:
# Cluster is empty.
continue
curr_score = (
-1.0 * np.sum(
pdists[medoid_ics[cluster_idx], y_pred == cluster_idx]) +
loss_mult * (1.0 - metrics.normalized_mutual_info_score(
y_gt, y_pred)))
pdist_in = pdists[y_pred == cluster_idx, :]
pdist_in = pdist_in[:, y_pred == cluster_idx]
all_scores_fac = np.sum(-1.0 * pdist_in, axis=1)
all_scores_loss = []
for i in range(y_pred.size):
if y_pred[i] != cluster_idx:
continue
# remove this cluster's current centroid
medoid_ics_i = medoid_ics[:cluster_idx] + medoid_ics[cluster_idx + 1:]
# add this new candidate to the centroid list
medoid_ics_i += [i]
y_pred_i = self._get_cluster_ics(pdists, medoid_ics_i)
all_scores_loss.append(loss_mult * (
1.0 - metrics.normalized_mutual_info_score(y_gt, y_pred_i)))
all_scores = all_scores_fac + all_scores_loss
max_score_idx = np.argmax(all_scores)
max_score = all_scores[max_score_idx]
if max_score > curr_score:
medoid_ics[cluster_idx] = np.where(
y_pred == cluster_idx)[0][max_score_idx]
def pam_augmented_fit(self, feat, y, loss_mult):
pam_max_iter = 5
self._check_init_args()
feat = self._check_array(feat)
pdists = pairwise_distance_np(feat)
self.loss_augmented_fit(feat, y, loss_mult)
print('PAM -1 (before PAM): score: %f, score_aug: %f' % (
self.score_, self.score_aug_))
# Initialize from loss augmented facility location
subset = self.center_ics_
for iter_ in range(pam_max_iter):
# update the cluster assignment
cluster_ics = self._get_cluster_ics(pdists, subset)
# update the medoid for each clusters
self._augmented_update_medoid_ics_in_place(pdists, y, cluster_ics, subset,
loss_mult)
self.score_ = np.float32(-1.0) * self._get_facility_distance(
pdists, subset)
self.score_aug_ = self.score_ + loss_mult * (
1.0 - metrics.normalized_mutual_info_score(
y, self._get_cluster_ics(pdists, subset)))
self.score_aug_ = self.score_aug_.astype(np.float32)
print('PAM iter: %d, score: %f, score_aug: %f' % (iter_, self.score_,
self.score_aug_))
self.center_ics_ = subset
self.labels_ = cluster_ics
return self
def _check_array(self, feat):
# Check that the number of clusters is less than or equal to
# the number of samples
if self.n_clusters > feat.shape[0]:
raise ValueError('The number of medoids ' + '({}) '.format(
self.n_clusters) + 'must be larger than the number ' +
'of samples ({})'.format(feat.shape[0]))
return feat
def _get_cluster_ics(self, pdists, subset):
"""Returns cluster indices for pdist and current medoid indices."""
# Assign data points to clusters based on
# which cluster assignment yields
# the smallest distance`
cluster_ics = np.argmin(pdists[subset, :], axis=0)
return cluster_ics
def _get_facility_distance(self, pdists, subset):
return np.sum(np.min(pdists[subset, :], axis=0))
class ClusterLossTest(test.TestCase):
def _genClusters(self, n_samples, n_clusters):
blobs = datasets.make_blobs(
n_samples=n_samples, centers=n_clusters)
embedding, labels = blobs
embedding = (embedding - embedding.mean(axis=0)) / embedding.std(axis=0)
embedding = embedding.astype(np.float32)
return embedding, labels
def testClusteringLossPAMOff(self):
if not HAS_SKLEARN:
return
with self.test_session():
margin_multiplier = 10.0
embeddings, labels = self._genClusters(n_samples=128, n_clusters=64)
loss_np = compute_cluster_loss_numpy(
embeddings, labels, margin_multiplier, enable_pam_finetuning=False)
loss_tf = metric_loss_ops.cluster_loss(
labels=ops.convert_to_tensor(labels),
embeddings=ops.convert_to_tensor(embeddings),
margin_multiplier=margin_multiplier,
enable_pam_finetuning=False)
loss_tf = loss_tf.eval()
self.assertAllClose(loss_np, loss_tf)
def testClusteringLossPAMOn(self):
if not HAS_SKLEARN:
return
with self.test_session():
margin_multiplier = 10.0
embeddings, labels = self._genClusters(n_samples=128, n_clusters=64)
loss_np = compute_cluster_loss_numpy(
embeddings, labels, margin_multiplier, enable_pam_finetuning=True)
loss_tf = metric_loss_ops.cluster_loss(
labels=ops.convert_to_tensor(labels),
embeddings=ops.convert_to_tensor(embeddings),
margin_multiplier=margin_multiplier,
enable_pam_finetuning=True)
loss_tf = loss_tf.eval()
self.assertAllClose(loss_np, loss_tf)
if __name__ == '__main__':
test.main()
| apache-2.0 |
GoogleCloudPlatform/training-data-analyst | blogs/textclassification/txtcls1/trainer/model.py | 2 | 7021 | #!/usr/bin/env python
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import tensorflow as tf
import tensorflow.contrib.learn as tflearn
import tensorflow.contrib.layers as tflayers
from tensorflow.contrib.learn.python.learn import learn_runner
import tensorflow.contrib.metrics as metrics
from tensorflow.python.platform import gfile
from tensorflow.contrib import lookup
tf.logging.set_verbosity(tf.logging.INFO)
# variables set by init()
BUCKET = None
TRAIN_STEPS = 1000
WORD_VOCAB_FILE = None
N_WORDS = -1
# hardcoded into graph
BATCH_SIZE = 32
# describe your data
TARGETS = ['nytimes', 'github', 'techcrunch']
MAX_DOCUMENT_LENGTH = 20
CSV_COLUMNS = ['source', 'title']
LABEL_COLUMN = 'source'
DEFAULTS = [['null'], ['null']]
PADWORD = 'ZYXW'
def init(bucket, num_steps):
global BUCKET, TRAIN_STEPS, WORD_VOCAB_FILE, N_WORDS
BUCKET = bucket
TRAIN_STEPS = num_steps
WORD_VOCAB_FILE = 'gs://{}/txtcls1/vocab_words'.format(BUCKET)
N_WORDS = save_vocab('gs://{}/txtcls1/train.csv'.format(BUCKET), 'title', WORD_VOCAB_FILE);
def save_vocab(trainfile, txtcolname, outfilename):
if trainfile.startswith('gs://'):
import subprocess
tmpfile = "vocab.csv"
subprocess.check_call("gsutil cp {} {}".format(trainfile, tmpfile).split(" "))
filename = tmpfile
else:
filename = trainfile
import pandas as pd
df = pd.read_csv(filename, header=None, sep='\t', names=['source', 'title'])
# the text to be classified
vocab_processor = tflearn.preprocessing.VocabularyProcessor(MAX_DOCUMENT_LENGTH, min_frequency=10)
vocab_processor.fit(df[txtcolname])
with gfile.Open(outfilename, 'wb') as f:
f.write("{}\n".format(PADWORD))
for word, index in vocab_processor.vocabulary_._mapping.iteritems():
f.write("{}\n".format(word))
nwords = len(vocab_processor.vocabulary_)
print('{} words into {}'.format(nwords, outfilename))
return nwords + 2 # PADWORD and <UNK>
def read_dataset(prefix):
# use prefix to create filename
filename = 'gs://{}/txtcls1/{}*csv*'.format(BUCKET, prefix)
if prefix == 'train':
mode = tf.contrib.learn.ModeKeys.TRAIN
else:
mode = tf.contrib.learn.ModeKeys.EVAL
# the actual input function passed to TensorFlow
def _input_fn():
# could be a path to one file or a file pattern.
input_file_names = tf.train.match_filenames_once(filename)
filename_queue = tf.train.string_input_producer(input_file_names, shuffle=True)
# read CSV
reader = tf.TextLineReader()
_, value = reader.read_up_to(filename_queue, num_records=BATCH_SIZE)
#value = tf.train.shuffle_batch([value], BATCH_SIZE, capacity=10*BATCH_SIZE, min_after_dequeue=BATCH_SIZE, enqueue_many=True, allow_smaller_final_batch=False)
value_column = tf.expand_dims(value, -1)
columns = tf.decode_csv(value_column, record_defaults=DEFAULTS, field_delim='\t')
features = dict(zip(CSV_COLUMNS, columns))
label = features.pop(LABEL_COLUMN)
# make targets numeric
table = tf.contrib.lookup.index_table_from_tensor(
mapping=tf.constant(TARGETS), num_oov_buckets=0, default_value=-1)
target = table.lookup(label)
return features, target
return _input_fn
# CNN model parameters
EMBEDDING_SIZE = 10
WINDOW_SIZE = EMBEDDING_SIZE
STRIDE = int(WINDOW_SIZE/2)
def cnn_model(features, target, mode):
table = lookup.index_table_from_file(vocabulary_file=WORD_VOCAB_FILE, num_oov_buckets=1, default_value=-1)
# string operations
titles = tf.squeeze(features['title'], [1])
words = tf.string_split(titles)
densewords = tf.sparse_tensor_to_dense(words, default_value=PADWORD)
numbers = table.lookup(densewords)
padding = tf.constant([[0,0],[0,MAX_DOCUMENT_LENGTH]])
padded = tf.pad(numbers, padding)
sliced = tf.slice(padded, [0,0], [-1, MAX_DOCUMENT_LENGTH])
print('words_sliced={}'.format(words)) # (?, 20)
# layer to take the words and convert them into vectors (embeddings)
embeds = tf.contrib.layers.embed_sequence(sliced, vocab_size=N_WORDS, embed_dim=EMBEDDING_SIZE)
print('words_embed={}'.format(embeds)) # (?, 20, 10)
# now do convolution
conv = tf.contrib.layers.conv2d(embeds, 1, WINDOW_SIZE, stride=STRIDE, padding='SAME') # (?, 4, 1)
conv = tf.nn.relu(conv) # (?, 4, 1)
words = tf.squeeze(conv, [2]) # (?, 4)
print('words_conv={}'.format(words)) # (?, 4)
n_classes = len(TARGETS)
logits = tf.contrib.layers.fully_connected(words, n_classes, activation_fn=None)
#print('logits={}'.format(logits)) # (?, 3)
predictions_dict = {
'source': tf.gather(TARGETS, tf.argmax(logits, 1)),
'class': tf.argmax(logits, 1),
'prob': tf.nn.softmax(logits)
}
if mode == tf.contrib.learn.ModeKeys.TRAIN or mode == tf.contrib.learn.ModeKeys.EVAL:
loss = tf.losses.sparse_softmax_cross_entropy(target, logits)
train_op = tf.contrib.layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer='Adam',
learning_rate=0.01)
else:
loss = None
train_op = None
return tflearn.ModelFnOps(
mode=mode,
predictions=predictions_dict,
loss=loss,
train_op=train_op)
def serving_input_fn():
feature_placeholders = {
'title': tf.placeholder(tf.string, [None]),
}
features = {
key: tf.expand_dims(tensor, -1)
for key, tensor in feature_placeholders.items()
}
return tflearn.utils.input_fn_utils.InputFnOps(
features,
None,
feature_placeholders)
def get_train():
return read_dataset('train')
def get_valid():
return read_dataset('eval')
from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils
def experiment_fn(output_dir):
# run experiment
return tflearn.Experiment(
tflearn.Estimator(model_fn=cnn_model, model_dir=output_dir),
train_input_fn=get_train(),
eval_input_fn=get_valid(),
eval_metrics={
'acc': tflearn.MetricSpec(
metric_fn=metrics.streaming_accuracy, prediction_key='class'
)
},
export_strategies=[saved_model_export_utils.make_export_strategy(
serving_input_fn,
default_output_alternative_key=None,
exports_to_keep=1
)],
train_steps = TRAIN_STEPS
)
| apache-2.0 |
sgenoud/scikit-learn | examples/svm/plot_svm_margin.py | 3 | 2291 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM Margins Example
=========================================================
The plots below illustrate the effect the parameter `C` has
on the seperation line. A large value of `C` basically tells
our model that we do not have that much faith in our data's
distrubution, and will only consider points close to line
of seperation.
A small value of `C` includes more/all the observations, allowing
the margins to be calculated using all the data in the area.
"""
print __doc__
# Code source: Gael Varoqueux
# Modified for Documentation merge by Jaques Grobler
# License: BSD
import numpy as np
import pylab as pl
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# figure number
fignum = 1
# fit the model
for name, penality in (('unreg', 1), ('reg', 0.05)):
clf = svm.SVC(kernel='linear', C=penality)
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
margin = 1 / np.sqrt(np.sum(clf.coef_ ** 2))
yy_down = yy + a * margin
yy_up = yy - a * margin
# plot the line, the points, and the nearest vectors to the plane
pl.figure(fignum, figsize=(4, 3))
pl.clf()
pl.plot(xx, yy, 'k-')
pl.plot(xx, yy_down, 'k--')
pl.plot(xx, yy_up, 'k--')
pl.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=80, facecolors='none', zorder=10)
pl.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=pl.cm.Paired)
pl.axis('tight')
x_min = -4.8
x_max = 4.2
y_min = -6
y_max = 6
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.predict(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
pl.figure(fignum, figsize=(4, 3))
pl.pcolormesh(XX, YY, Z, cmap=pl.cm.Paired)
pl.xlim(x_min, x_max)
pl.ylim(y_min, y_max)
pl.xticks(())
pl.yticks(())
fignum = fignum + 1
pl.show()
| bsd-3-clause |
wllmtrng/ggplot | ggplot/tests/test_chart_components.py | 12 | 1664 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import pandas as pd
from nose.tools import assert_raises, assert_equal, assert_is_none
from ggplot import *
from ggplot.utils.exceptions import GgplotError
def test_chart_components():
"""
Test invalid arguments to chart components
"""
df = pd.DataFrame({'x': np.arange(10),
'y': np.arange(10)})
gg = ggplot(df, aes(x='x', y='y'))
# test ggtitle
assert_raises(GgplotError, ggtitle, None)
# test xlim
assert_raises(GgplotError, xlim, "foo", 1)
assert_raises(GgplotError, xlim, "foo", "bar")
# test ylim
assert_raises(GgplotError, ylim, "foo", 1)
assert_raises(GgplotError, ylim, "foo", "bar")
# test xlab
assert_raises(GgplotError, ylab, None)
# test ylab
assert_raises(GgplotError, ylab, None)
# test labs
test_xlab = 'xlab'
gg_xlab = gg + labs(x=test_xlab)
assert_equal(gg_xlab.xlab, test_xlab)
assert_is_none(gg_xlab.ylab)
assert_is_none(gg_xlab.title)
test_ylab = 'ylab'
gg_ylab = gg + labs(y=test_ylab)
assert_is_none(gg_ylab.xlab)
assert_equal(gg_ylab.ylab, test_ylab)
assert_is_none(gg_ylab.title)
test_title = 'title'
gg_title = gg + labs(title=test_title)
assert_is_none(gg_title.xlab)
assert_is_none(gg_title.ylab)
assert_equal(gg_title.title, test_title)
gg_labs = gg + labs(x=test_xlab, y=test_ylab, title=test_title)
assert_equal(gg_labs.xlab, test_xlab)
assert_equal(gg_labs.ylab, test_ylab)
assert_equal(gg_labs.title, test_title)
| bsd-2-clause |
davidenitti/ML | autoencoders/cnn_autoencoders.py | 1 | 23102 | '''
CNN autoencoder applied to faces (for example)
Author: Davide Nitti
'''
import torch
from torch import nn
from torchvision import transforms, datasets
import argparse
import torch.optim as optim
import matplotlib
import time
import json
import torch.nn.functional as F
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from core.modules import View, CropPad, Identity, Interpolate, ConvBlock, TanhMod, PixelNorm2d
from core.utils import get_lr, load_model, save_model, set_lr
try:
matplotlib.use("TkAgg")
except:
print('WARNING: TkAgg not loaded')
import matplotlib.pyplot as plt
import random, os
import numpy as np
from multiprocessing import Process
def start_process(func, args):
p = Process(target=func, args=args)
p.start()
return p
STD = 0.50
try:
import IPython.display # for colab compatibility
except:
plt.ion()
def mypause(interval):
backend = plt.rcParams['backend']
if backend in matplotlib.rcsetup.interactive_bk:
figManager = matplotlib._pylab_helpers.Gcf.get_active()
if figManager is not None:
canvas = figManager.canvas
if canvas.figure.stale:
canvas.draw()
canvas.start_event_loop(interval)
return
class Encoder(nn.Module):
def __init__(self, net_params, size_input):
super(Encoder, self).__init__()
self.debug = False
if net_params['norm'] == 'PixelNorm2d':
self.norm = PixelNorm2d
elif net_params['norm'] == 'BatchNorm2d':
self.norm = nn.BatchNorm2d
elif net_params['norm'] == 'LocalResponseNorm':
self.norm = nn.LocalResponseNorm
elif net_params['norm'] == None:
self.norm = Identity
elif net_params['norm'] == 'InstanceNorm2d':
self.norm = nn.InstanceNorm2d
else:
raise NotImplementedError
self.base = net_params['base']
self.multiplier_chan = net_params['multiplier_chan']
self.non_lin = getattr(nn, net_params['non_linearity'])
self.base_enc = net_params['num_features_encoding']
self.upconv_chan = net_params['upconv_chan']
self.upconv_size = net_params['upconv_size']
max_hw = max(size_input[-2:])
if size_input[-1] != size_input[-2]:
self.pad_inp = CropPad(max_hw, max_hw)
self.crop_inp = CropPad(size_input[-2], size_input[-1])
else:
print('no pad/crop')
self.pad_inp = Identity()
self.crop_inp = Identity()
conv_block = ConvBlock
self.conv1 = conv_block(in_channels=3, out_channels=self.base, kernel_size=3, stride=1,
padding=1, nonlin=self.non_lin, batch_norm=self.norm, affine=net_params['affine'])
list_conv2 = []
chan = self.base
for i in range(6):
new_chan = min(net_params['max_chan'], int(chan * self.multiplier_chan))
list_conv2 += [conv_block(in_channels=chan, out_channels=chan, kernel_size=3, stride=1,
padding=1, nonlin=self.non_lin, batch_norm=self.norm),
conv_block(in_channels=chan, out_channels=chan, kernel_size=3, stride=1,
padding=1, nonlin=self.non_lin, batch_norm=self.norm),
conv_block(in_channels=chan, out_channels=new_chan, kernel_size=3, stride=2,
padding=1, nonlin=self.non_lin, batch_norm=self.norm)]
chan = new_chan
self.conv2 = nn.ModuleList(list_conv2)
pre_encoding_shape = self.encoding(torch.zeros(size_input)).shape
print('pre_encoding_shape', pre_encoding_shape)
self.conv_enc = nn.Sequential(
*[View([-1]),
nn.Linear(pre_encoding_shape[1] * pre_encoding_shape[2] * pre_encoding_shape[3], self.base_enc),
TanhMod(net_params['scale_tanh'])])
self.upconv1 = nn.ModuleList([
View([-1]), nn.Linear(self.base_enc, self.upconv_chan * self.upconv_size * self.upconv_size),
View([self.upconv_chan, self.upconv_size, self.upconv_size]),
self.non_lin(),
conv_block(in_channels=self.upconv_chan, out_channels=self.upconv_chan, kernel_size=3, stride=1,
padding=1, nonlin=self.non_lin, batch_norm=self.norm)])
list_upconv2 = []
chan = self.upconv_chan
new_chan = chan
for i in range(net_params['upscale_blocks']):
if i >= 3:
new_chan = int(chan // self.multiplier_chan)
list_upconv2 += [nn.Upsample(scale_factor=2, mode='nearest'),
conv_block(in_channels=chan, out_channels=new_chan, kernel_size=3,
stride=1, padding=1, nonlin=self.non_lin, batch_norm=self.norm),
conv_block(in_channels=new_chan, out_channels=new_chan, kernel_size=3,
stride=1, padding=1, nonlin=self.non_lin, batch_norm=self.norm)]
chan = new_chan
self.upconv2 = nn.ModuleList(list_upconv2)
self.upconv_rec = nn.ModuleList([
self.norm(chan),
nn.Conv2d(chan, 3, 3, 1, padding=1),
# nn.Tanh(),
Interpolate((max_hw, max_hw), mode='bilinear')])
def decoding(self, encoding):
debug = self.debug
x = encoding
for layer in self.upconv1:
x = layer(x)
if debug:
print(x.shape, layer)
for layer in self.upconv2:
x = layer(x)
if debug:
print(x.shape, layer)
for layer in self.upconv_rec:
x = layer(x)
if debug:
print(x.shape, layer)
x = self.crop_inp(x)
if debug:
print(x.shape)
if not self.training:
x = torch.clamp(x, -1, 1)
return x
def encoding(self, x):
debug = self.debug
if debug:
print(x.shape, 'encoding start')
x = self.pad_inp(x)
if debug:
print(x.shape, 'pad_inp')
x = self.conv1(x)
if debug:
print(x.shape)
for layer in self.conv2:
x = layer(x)
if debug:
print(x.shape, layer)
return x
def forward(self, x):
x = self.encoding(x)
encoding = self.conv_enc(x)
if self.debug:
print('encoding', encoding.shape)
x = self.decoding(encoding)
return encoding, x
def renorm(inp):
img = inp.permute(1, 2, 0)
return torch.clamp(img * STD + 0.5, 0, 1)
def renorm_batch(inp):
# print(inp.shape)
img = inp.permute(0, 2, 3, 1)
# print(img.shape)
return torch.clamp(img * STD + 0.5, 0, 1)
def var_loss(pred, gt):
var_pred = nn.functional.avg_pool2d((pred - nn.functional.avg_pool2d(pred, 5, stride=1, padding=2)) ** 2,
kernel_size=5, stride=1)
var_gt = nn.functional.avg_pool2d((gt - nn.functional.avg_pool2d(gt, 5, stride=1, padding=2)) ** 2, kernel_size=5,
stride=1)
loss = torch.mean((var_pred - var_gt) ** 2)
return loss
def train(args, model, device, train_loader, optimizer, epoch, upload_checkpoint, callback, process_upload, scheduler):
stats_enc = {'mean': 0, 'sum_var': 0, 'n': 0, 'min': torch.tensor(100000000.), 'max': torch.zeros(1)}
mean_image = 0.0
model.train()
total_loss = 0.
num_loss = 0
image_first_batch = None
if args.local:
fig, ax = plt.subplots(9, figsize=(18, 10))
num_baches = 0.0
total_time_batches = 0.0
for batch_idx, (data, target) in enumerate(train_loader):
time.sleep(args.sleep)
start = time.time()
model.train()
data, target = data.to(device), target.to(device)
if image_first_batch is None:
image_first_batch = data
optimizer.zero_grad()
with torch.autograd.detect_anomaly():
encoding, output = model(data)
stats_enc['min'] = torch.min(stats_enc['min'], encoding.min().cpu().detach())
stats_enc['max'] = torch.max(stats_enc['max'], encoding.max().cpu().detach())
for b in range(encoding.shape[0]):
stats_enc['n'] += 1
mean_image += (data[b].cpu().detach() - mean_image) / stats_enc['n']
mean_old = stats_enc['mean']
stats_enc['mean'] += (encoding[b].cpu().detach() - stats_enc['mean']) / stats_enc['n']
stats_enc['sum_var'] += (encoding[b].cpu().detach() - mean_old) * (
encoding[b].cpu().detach() - stats_enc['mean'])
stats_enc['var'] = (stats_enc['sum_var'] / stats_enc['n'])
stats_enc['std'] = stats_enc['var'] ** 0.5
loss_encoding = 0.1 * args.net_params['reg'] * torch.mean(encoding ** 2)
loss_encoding += args.net_params['reg'] * torch.mean(encoding.mean(0) ** 2)
#loss_encoding += 0.1*args.net_params['reg'] * torch.mean((1.0 - (encoding).var(0)) ** 2)
if 'dist_reg' in args.net_params:
pass # loss_encoding -= args.net_params['dist_reg']*(encoding.view(encoding.shape[0],-1,1)-encoding.view(1,encoding.shape[0],-1))**2
loss_mse = torch.mean((data - output) ** 2)
# loss_aer = torch.mean(torch.abs(data - output))
loss = loss_mse + loss_encoding # + 0.01 * loss_aer
total_loss += loss.item()
num_loss += 1
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
torch.nn.utils.clip_grad_value_(model.parameters(), 0.5)
optimizer.step()
time_batch = time.time() - start
total_time_batches += time_batch
num_baches += 1
if batch_idx % args.log_interval == 0 or batch_idx == len(train_loader) - 1:
if batch_idx > 0 and batch_idx % (args.log_interval * 3) == 0 and upload_checkpoint:
if process_upload is not None:
process_upload.join()
save_model(args.checkpoint, epoch, model, optimizer, scheduler)
if callback is not None:
callback(False)
process_upload = start_process(callback, (True,))
if not args.local:
fig, ax = plt.subplots(9, figsize=(18, 10))
model.eval()
img1 = renorm(image_first_batch[0])
mean_image_norm = renorm(mean_image)
if epoch == 1:
matplotlib.image.imsave(os.path.join(args.res_dir, 'mean_image.png'),
mean_image_norm.cpu().detach().numpy(), vmin=0.0, vmax=1.0)
with torch.no_grad():
if batch_idx == 0:
model.debug = True
encod1, output1 = model(image_first_batch)
if batch_idx == 0:
model.debug = False
img1_recon = renorm(output1[0])
all_img = torch.cat((img1.cpu(), img1_recon.cpu()), 1).detach().numpy()
matplotlib.image.imsave(
os.path.join(args.res_dir, 'img1_reconstruction_epoch_{0:03d}.png'.format(epoch)), all_img,
vmin=0.0,
vmax=1.0)
img2 = renorm(data[0])
img_rec2 = renorm(output[0])
all_img2 = torch.cat((img2, img_rec2), 1).cpu().detach().numpy()
matplotlib.image.imsave(
os.path.join(args.res_dir, 'img2_reconstruction_epoch_{0:03d}.png'.format(epoch)), all_img2,
vmin=0.0,
vmax=1.0)
zero_enc = stats_enc['mean'].view(1, -1).cuda() #torch.zeros_like(encod1[:1])
rand_enc = torch.clamp(
torch.randn_like(encod1[:1]) * stats_enc['std'].cuda() + stats_enc['mean'].cuda(),
-args.net_params['scale_tanh'], args.net_params['scale_tanh'])
enc_to_show = torch.cat((zero_enc, rand_enc, encod1[:1]), 0)
rand_img = model.decoding(enc_to_show)
rand_img_list = []
width_img = rand_img.shape[2]
show_every = 3
for i in range(30 * show_every):
if i % show_every == 0:
rand_img_list.append(rand_img)
_, rand_img = model(rand_img)
rand_img = renorm_batch(torch.cat(rand_img_list, 3))
matplotlib.image.imsave(
os.path.join(args.res_dir, 'zero_encode_evolution_epoch_{0:03d}.png'.format(epoch)),
rand_img[0, :, :width_img * 5].cpu().detach().numpy(), vmin=0.0, vmax=1.0)
matplotlib.image.imsave(
os.path.join(args.res_dir, 'rand_encode_evolution_epoch_{0:03d}.png'.format(epoch)),
rand_img[1, :, :width_img * 5].cpu().detach().numpy(), vmin=0.0, vmax=1.0)
matplotlib.image.imsave(os.path.join(args.res_dir, 'encode_evolution_epoch_{0:03d}.png'.format(epoch)),
rand_img[2, :, :width_img * 5].cpu().detach().numpy(),
vmin=0.0, vmax=1.0)
rand_img = torch.cat([r for r in rand_img], 0).cpu().detach().numpy()
img_list = []
for i in range(6):
alpha = i / 5.0
blend_enc = alpha * encod1[:1] + (1 - alpha) * encoding[:1]
img_list.append(renorm(model.decoding(blend_enc)[0]))
all_img_blend = torch.cat(img_list, 1).cpu().detach().numpy()
plt.figure(1)
ax[0].imshow(np.hstack((all_img, all_img2)))
ax[1].imshow(all_img_blend)
matplotlib.image.imsave(os.path.join(args.res_dir, 'img1_to_img2_morph_epoch_{0:03d}.png'.format(epoch)),
all_img_blend, vmin=0.0,
vmax=1.0)
h = rand_img.shape[0]
for row in range(2):
ax[row + 2].imshow(rand_img[:h // 3, row * width_img * 15:(row + 1) * width_img * 15])
for row in range(2):
ax[row + 2 + 2].imshow(rand_img[h // 3:2 * h // 3, row * width_img * 15:(row + 1) * width_img * 15])
for row in range(2):
ax[row + 2 + 2 + 2].imshow(rand_img[2 * h // 3:, row * width_img * 15:(row + 1) * width_img * 15])
plt.tight_layout()
for a in ax:
a.axis('off')
plt.subplots_adjust(wspace=0, hspace=0)
if args.local:
mypause(0.01)
else:
# clear_output()
plt.draw()
plt.pause(0.01)
print('data {:.3f} {:.3f} {:.3f}'.format(data.min().item(), data.max().item(), data.mean().item()))
print('output {:.3f} {:.3f} {:.3f}'.format(output.min().item(), output.max().item(), output.mean().item()))
# print(img1_recon.min().item(), img1_recon.max().item(), img1_recon.mean().item())
print('stats_enc')
for s in stats_enc:
if s not in ['sum_var', 'var']:
if isinstance(stats_enc[s], int):
print("{} = {}".format(s, stats_enc[s]))
else:
print("{} = {:.3f} {:.3f} {:.3f} shape {}".format(
s, stats_enc[s].min().item(), stats_enc[s].mean().item(), stats_enc[s].max().item(),
stats_enc[s].shape))
print('non_lin', model.non_lin)
print(
'Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.4f} loss_mse: {:.4f} loss_enc {:.4f} time_batch {:.2f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), (total_loss / num_loss),
loss_mse, loss_encoding.item(), total_time_batches / num_baches))
model.train()
plt.close()
return process_upload
def get_lr(optimizer):
for param_group in optimizer.param_groups:
return param_group['lr']
def get_args(args_list=None):
parser = argparse.ArgumentParser(description='PyTorch Example')
parser.add_argument('--batch_size', type=int, default=26, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--epochs', type=int, default=100, metavar='N',
help='number of epochs to train (default: 50)')
parser.add_argument('--lr', type=float, default=None, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--local', action='store_true', default=False,
help='local')
parser.add_argument('--seed', type=int, default=-1, metavar='S',
help='random seed (default: -1)')
parser.add_argument('--optimizer', default='adam',
help='optimizer')
parser.add_argument('--sleep', type=float, default=0.001,
help='sleep')
parser.add_argument('--decay_lr', type=float, default=0.995,
help='sleep')
parser.add_argument('--log_interval', type=int, default=200, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--checkpoint', default='tmp.pth',
help='checkpoint')
parser.add_argument('--save_raw', action='store_true', default=False,
help='For Saving the current Model (raw)')
parser.add_argument('--dataset', default='/home/davide/datasets/', help='dataset path '
'e.g. https://drive.google.com/open?id=0BxYys69jI14kYVM3aVhKS1VhRUk')
parser.add_argument('--res_dir', default='./', help='result dir')
parser.add_argument('--net_params', default={'non_linearity': "PReLU",
'norm': 'InstanceNorm2d',
'base': 32,
'num_features_encoding': 256,
'upconv_chan': 256,
'upconv_size': 4,
'multiplier_chan': 2,
'max_chan': 512,
'upscale_blocks': 6,
'reg': 0.05,
'dist_reg': 0.1,
'crop': -1,
'scale_tanh': 4,
'affine': True
}, type=dict, help='net_params')
args = parser.parse_args(args_list)
return args
def main(args, callback=None, upload_checkpoint=False):
print(vars(args))
print('upload_checkpoint', upload_checkpoint)
if not os.path.exists(args.res_dir):
os.makedirs(args.res_dir)
with open(os.path.join(args.res_dir, 'params.json'), 'w') as f:
json.dump(vars(args), f, indent=4)
if not os.path.exists(os.path.dirname(args.checkpoint)):
os.makedirs(os.path.dirname(args.checkpoint))
use_cuda = not args.no_cuda and torch.cuda.is_available()
if args.seed >= 0:
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
transform_list = [
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5],
std=[STD, STD, STD])
]
if args.net_params['crop'] > 0:
transform_list = [transforms.CenterCrop(args.net_params['crop'])] + transform_list
data_transform = transforms.Compose(transform_list)
face_dataset_train = datasets.ImageFolder(root=args.dataset,
transform=data_transform)
# face_dataset_test = datasets.ImageFolder(root='test',
# transform=data_transform)
train_loader = torch.utils.data.DataLoader(face_dataset_train,
batch_size=args.batch_size, shuffle=True,
num_workers=0)
# test_loader = torch.utils.data.DataLoader(face_dataset_test,
# batch_size=args.test_batch_size, shuffle=True, **kwargs)
# args.checkpoint = "cnn3.pth"
model = Encoder(args.net_params, next(iter(train_loader))[0].shape).to(device)
if args.optimizer == 'sgd':
if args.lr is None:
args.lr = 0.07
optimizer = optim.SGD(model.parameters(), lr=args.lr, nesterov=True, momentum=0.8, weight_decay=0)
elif args.optimizer == 'adam':
if args.lr is None:
args.lr = 0.00075
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=0)
else:
raise NotImplementedError
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=args.decay_lr)
if os.path.exists(args.checkpoint):
epoch_start = load_model(args.checkpoint, model, optimizer, scheduler) + 1
else:
epoch_start = 1
if False:
set_lr(optimizer, args.lr)
process_upload = None
for epoch in range(epoch_start, args.epochs + 1):
print('learning rate {:.5f}'.format(get_lr(optimizer)))
process_upload = train(args, model, device, train_loader, optimizer, epoch, upload_checkpoint, callback,
process_upload, scheduler)
if process_upload is not None:
process_upload.join()
save_model(args.checkpoint, epoch, model, optimizer, scheduler)
if callback is not None:
callback(False)
if upload_checkpoint:
process_upload = start_process(callback, (True,))
scheduler.step()
# no test at the moment
# test(args, model, device, test_loader)
if __name__ == '__main__':
base_dir_res = "/home/davide/results/cnn_autoencoders_local"
base_dir_dataset = '/home/davide/datasets/faces'
list_args = ['--sleep', '0.5', '--local', '--batch_size', '18', '--dataset', base_dir_dataset,
'--res_dir', base_dir_res,
'--checkpoint', os.path.join(base_dir_res, 'checkpoint.pth')]
args = get_args(list_args)
main(args)
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.