repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
nadavbar/BorderPeelingClustering | clustering_tools.py | 1 | 26005 | import itertools
import numpy as np
import matplotlib.pyplot as plt
from collections import OrderedDict
from copy import deepcopy
from enum import Enum
from matplotlib import patches
from matplotlib.collections import PatchCollection
from os.path import join
from sklearn import cluster
from sklearn.cluster import AffinityPropagation, DBSCAN, MeanShift, estimate_bandwidth, SpectralClustering
from sklearn.neighbors import kneighbors_graph
from sklearn import metrics
from sklearn import preprocessing
# read arff file:
def read_arff(file_path):
read_data = False
data = []
labels = []
with open(file_path) as handle:
for l in handle:
l = l.rstrip()
if (read_data):
splitted = l.split(",")
row = [float(s) for s in splitted[:len(splitted)-1]]
data.append(row)
labels.append(splitted[len(splitted)-1])
elif (l.lower() == "@data"):
read_data = True
le = preprocessing.LabelEncoder()
encoded_labels = le.fit_transform(labels)
return np.ndarray(shape=(len(data), len(data[0])), buffer=np.matrix(data)), np.array(encoded_labels)
def save_data(file_path, data , labels):
with open(file_path, "w") as handle:
for p, l in zip(data, labels):
line = ",".join([str(s) for s in p]) + "," + str(l)
handle.write(line + "\n")
def load_from_file_or_data(obj, seperator=',', dim=2, hasLabels=False):
if (type(obj) is str):
return read_data(obj, seperator=seperator, dim=dim, hasLabels=hasLabels)
else:
return obj
def add_random_noise(data, labels=None, noise_points_count=100):
# get bounding box of data:
dim = data.shape[1]
min_vals = np.zeros(dim)
max_vals = np.zeros(dim)
# initialize the boundaries with the value of the first row
for v,i in zip(data[0].A1,xrange(dim)):
min_vals[i] = v
max_vals[i] = v
for r in data:
for v,i in zip(r.A1,xrange(dim)):
if (v > max_vals[i]):
max_vals[i] = v
if (v < min_vals[i]):
min_vals[i] = v
# add random points:
noise_points = []
for i in xrange(dim):
noise_points.append(np.random.uniform(min_vals[i], max_vals[i], (noise_points_count,1)))
noise = np.concatenate(tuple(noise_points), axis=1)
noised_data = np.concatenate((data, noise))
noised_labels = np.concatenate((labels, -1*np.ones(noise_points_count)))
return noised_data, noised_labels
def draw_clusters(X, labels, colors=None, show_plt=True, show_title=False, name=None, ax=None,
markersize=15, markeredgecolor='k', use_clustes_as_keys = False, linewidth=0,
noise_data_color='k'):
import seaborn as sns
if (ax == None):
ax = plt
#unique_labels = set(labels)
unique_labels = np.unique(labels)
label_map = sorted(unique_labels)
if (colors == None):
colors = sns.color_palette()
if len(colors) < len(unique_labels):
colors = plt.cm.Spectral(np.linspace(1, 0, len(unique_labels)))
has_noise = False
if not use_clustes_as_keys:
if (label_map[0] == -1):
if (isinstance(colors, list)):
colors = [noise_data_color] + colors
else:
colors = [noise_data_color] + colors.tolist()
#for k, col in zip(label_map, colors):
for k, i in zip(label_map, xrange(len(label_map))):
if k == -1:
# Black used for noise.
col = noise_data_color
has_noise = True
else:
if use_clustes_as_keys:
col = colors[int(k)]
else:
col = colors[i]
class_member_mask = (labels == k)
xy = X[class_member_mask]
ax.scatter(xy[:, 0], xy[:, 1], s=markersize, facecolor=col,
edgecolor=markeredgecolor, linewidth=linewidth)
#ax.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
# markeredgecolor=markeredgecolor, markersize=markersize, lw=lw)
if (show_title):
labels_count = len(unique_labels)
if (has_noise):
labels_count = labels_count - 1
title_prefix = ""
if (name != None):
title_prefix = "%s - "%name
if hasattr(ax, 'set_title'):
ax.set_title((title_prefix + 'Estimated number of clusters: %d') % len(unique_labels))
else:
ax.title((title_prefix + 'Estimated number of clusters: %d') % len(unique_labels))
#if (show_plt):
# ax.show()
return ax
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def draw_clusters3d(X, labels, colors=None, show_plt=True, show_title=False, name=None, ax=None, markersize=15, markeredgecolor='k', linewidth=0):
import seaborn as sns
#if (ax == None):
# ax = plt
#unique_labels = set(labels)
fig = plt.figure(figsize=(float(1600) / float(72), float(1600) / float(72)))
ax = fig.add_subplot(111, projection='3d')
unique_labels = np.unique(labels)
label_map = sorted(unique_labels)
if (colors == None):
colors = sns.color_palette()
#colors = plt.cm.Spectral(np.linspace(1, 0, len(unique_labels)))
has_noise = False
if (label_map[0] == -1):
colors = ['k'] + colors
for k, col in zip(label_map, colors):
if k == -1:
# Black used for noise.
#col = 'k'
has_noise = True
class_member_mask = (labels == k)
xy = X[class_member_mask]
print col
ax.scatter(xy[:, 0], xy[:, 1], xy[:, 2], s=markersize, c=col)
# edgecolor=markeredgecolor, linewidth=linewidth)
#ax.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
# markeredgecolor=markeredgecolor, markersize=markersize, lw=lw)
if (show_title):
labels_count = len(unique_labels)
if (has_noise):
labels_count = labels_count - 1
title_prefix = ""
if (name != None):
title_prefix = "%s - "%name
if hasattr(ax, 'set_title'):
ax.set_title((title_prefix + 'Estimated number of clusters: %d') % len(unique_labels))
else:
ax.title((title_prefix + 'Estimated number of clusters: %d') % len(unique_labels))
#if (show_plt):
# ax.show()
#ax.set_zlim([-0.01, 0])
return ax
def read_data(filePath, seperator=',', has_labels=True):
with open(filePath) as handle:
data = []
labels = None
if (has_labels):
labels = []
for line in handle:
line = line.rstrip()
if len(line) == 0:
continue
row = []
line_parts = line.split(seperator)
row = [float(i) for i in line_parts[:len(line_parts)-1]]
data.append(row)
if (has_labels):
label = int(line_parts[-1])
labels.append(label)
return np.ndarray(shape=(len(data), len(data[0])), buffer=np.matrix(data)), np.array(labels)
def show_clusters(X, labels_true):
X, labels_true = load_from_file_or_data(pathOrData, hasLabels=True)
draw_clusters(X, labels_true)
def run_dbscan(data, eps, min_samples):
db = DBSCAN(eps=eps, min_samples=min_samples).fit(data)
return db.labels_
def run_hdbscan(data, min_cluster_size):
import hdbscan
hdb = clusterer = hdbscan.HDBSCAN(min_cluster_size=min_cluster_size).fit(data)
return hdb.labels_
def clusters_to_labels(clusters, labels, force_unique=False):
# for each cluster - find the matching label according to the majority of the data points
# get the number of clusters:
offset = 0
if (0 not in np.unique(labels)):
offset = 1
labels_count = max(np.unique(labels)) + (1 - offset)
clusters_hist_map = {}
clusters_labels_map = {}
for c,l in zip(clusters, labels):
if (c == -1):
continue
if (not clusters_hist_map.has_key(c)):
clusters_hist_map[c] = np.zeros(labels_count)
clusters_hist_map[c][l - offset] += 1
unset_clusters = []
for c in clusters_hist_map.keys():
l = np.argmax(clusters_hist_map[c])
if force_unique:
label_already_used = False
for k in clusters_labels_map:
if clusters_labels_map[k] == (l + offset):
unset_clusters.append(c)
label_already_used = True
if label_already_used:
continue
clusters_labels_map[c] = l + offset
if force_unique:
current_max_label = np.max(labels)
for c in unset_clusters:
label_to_use = -1
for l in labels:
is_label_used = False
for k in clusters_labels_map:
if clusters_labels_map[k] == l:
is_label_used = True
break
if not is_label_used:
label_to_use = l
break
if label_to_use == -1:
current_max_label += 1
label_to_use = current_max_label
clusters_labels_map[c] = label_to_use
new_clusters = np.zeros(len(clusters))
for i,c in zip(xrange(len(new_clusters)), clusters):
if c == -1:
new_clusters[i] = -1
else:
new_clusters[i] = clusters_labels_map[c]
return new_clusters
def evaluate_clustering(X, labels_true, labels):
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Normalized Mutual Information: %0.3f"
% metrics.normalized_mutual_info_score(labels_true, labels))
try:
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels))
except ValueError:
print("Silhouette Coefficient: None")
def cluster_and_evaluate(path_or_data, method):
X, labels_true = load_from_file_or_data(path_or_data, hasLabels=True)
labels = method(X)
draw_clusters(X,labels)
return evaluate_clustering(X, labels_true, labels)
def run_mean_shift(data, bandwidth=None, qunatile=0.09, cluster_all=False):
if (bandwidth == None):
bandwidth = estimate_bandwidth(data, quantile=qunatile, n_samples=len(data))
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True, cluster_all=cluster_all)
return ms.fit(data).labels_
def run_spectral_clustering(data, k):
spectral = SpectralClustering(n_clusters=k,
eigen_solver='arpack',
affinity="nearest_neighbors", n_init=1000)
return spectral.fit(data).labels_
def run_affinity_propogation(data, damping=0.5):
af = AffinityPropagation(damping=damping).fit(data)
return af.labels_
def run_ward_clustering(data, k):
# connectivity matrix for structured Ward
connectivity = kneighbors_graph(data, n_neighbors=10, include_self=False)
# make connectivity symmetric
connectivity = 0.5 * (connectivity + connectivity.T)
return cluster.AgglomerativeClustering(n_clusters=k, linkage='ward',
connectivity=connectivity).fit(connectivity.toarray()).labels_
def sample_from_radius(data, number_of_centers, max_length, radius):
order = np.random.permutation(len(data))
centers = []
filter_mask = np.zeros(len(data))
for i in xrange(number_of_centers):
centers.append(data[order[i]])
filter_mask[order[i]] = 1.0
samples_count = number_of_centers
for i in order[1:]:
if samples_count > max_length:
break
current = data[order[i]]
for c in centers:
dist = np.linalg.norm(c - current)
if dist <= radius:
filter_mask[order[i]] = 1.0
samples_count += 1
break
return filter_mask.astype(bool)
def get_count_by_labels(labels):
count = {}
for l in labels:
if (not count.has_key(l)):
count[l] = 1
else:
count[l] += 1
return count
def filter_small_clusters(labels, filter_mask, threshold):
filtered_labels = labels[filter_mask]
count_by_labels = get_count_by_labels(filtered_labels)
#filter_mask = np.zeros(len(labels))
for i in xrange(len(filter_mask)):
if (not filter_mask[i]):
continue
filter_mask[i] = 1.0 if count_by_labels[labels[i]] > threshold else 0.0
# filtered_data = data[filter_mask.astype(bool)]
# filtered_labels = labels[filter_mask.astype(bool)]
# filter out clusters with less than 50 items
# return filtered_data, filtered_labels
return filter_mask
def sample_with_radius_and_filter_small_clusters(data, labels, number_of_centers, max_size, radius, cluster_min_points):
filter_mask = sample_from_radius(data, number_of_centers, max_size, radius)
return filter_small_clusters(labels, filter_mask, cluster_min_points)
from sklearn.neighbors import NearestNeighbors
from scipy.spatial.distance import pdist
def draw_knn_dist_hist(data, k=20):
nbrs = NearestNeighbors(n_neighbors=k).fit(data, data)
all_dists = []
distances, indices = nbrs.kneighbors()
for dists in distances:
for d in dists:
all_dists.append(d)
print "knn dists stats:"
print_dists_statistics(all_dists)
plt.figure()
plt.title("knn distance hist")
plt.hist(all_dists, bins=50);
def draw_dist_hist(data):
dists = pdist(data)
print "all dists stats:"
print_dists_statistics(dists)
plt.figure()
plt.title("all distances hist")
plt.hist(dists, bins=50)
def print_dists_statistics(dists):
print "mean: %.3f"%(np.mean(dists))
print "median: %.3f"%(np.median(dists))
print "variance: %.3f"%(np.var(dists))
print "max: %.3f"%(np.max(dists))
print "min: %.3f"%(np.min(dists))
print "std+mean: %.3f"%(np.std(dists) + np.mean(dists))
def show_dists_stats(data,k=20):
draw_dist_hist(data)
draw_knn_dist_hist(data, k)
class DebugPlotSession:
def __init__(self, output_dir, marker_size=120, line_width=1.5):
self.current_index = 0
self.output_dir = output_dir
self.axes_ylim = None
self.axes_xlim = None
self.line_width = line_width
self.marker_size = marker_size
def get_or_set_axes_lim(self, plt, data):
axes = plt.gca()
# find min and max x, min, max y:
if (self.axes_ylim == None or self.axes_xlim == None):
min_x = np.min(data[:, 0])
max_x = np.max(data[:, 0])
min_y = np.min(data[:, 1])
max_y = np.max(data[:, 1])
self.axes_ylim = (min_y - 0.5, max_y + 0.5)
self.axes_xlim = (min_x - 0.5, max_x + 0.5)
axes.set_ylim(self.axes_ylim)
axes.set_xlim(self.axes_xlim)
def add_circles(self, ax, centers, radis):
circles = []
for c, r in zip(centers, radis):
circles.append(patches.Circle(tuple(c), r))
patch_collection = PatchCollection(circles)
patch_collection.set_facecolor("none")
patch_collection.set_edgecolor("blue")
patch_collection.set_linewidth(1.5)
patch_collection.set_linestyle("dashed")
ax.add_collection(patch_collection)
print "added %d circles"%len(circles)
def plot_and_save(self, data, filter=[],
colors=[(192.0 / 255.0, 0, 0), '#6b8ba4', (1.0, 1.0, 1.0)],
links = None, circles_and_radis = None):
if (self.output_dir == None):
return
self.current_index = self.current_index + 1
if (len(filter) == 0):
colors = ['blue']
filter = np.array([True] * len(data))
#else:
# for first iteration..
# colors = [(192.0/255.0, 0, 0),(146.0/255, 208.0/255, 80.0/255)]
# for second iteration..
#colors = [(255.0/255.0, 192.0/255.0, 0),(146.0/255, 208.0/255, 80.0/255)]
# for 3rd iteration
#colors = [(255.0/255.0, 255.0/255.0, 0),(146.0/255, 208.0/255, 80.0/255)]
#colors = ['red','blue']
plt.figure(figsize=(800.0/72.0, 800.0/72.0))
fig = draw_clusters(data, filter, colors=colors, show_plt=False,
show_title=False, markersize=self.marker_size,
markeredgecolor=(56.0/255.0,93.0/255.0,138.0/255.0), linewidth=self.line_width)
self.get_or_set_axes_lim(fig, data)
ax = fig.axes()
if (links != None):
for p in links:
#point1 = data[p[0],:].A1
#point2 = data[p[1],:].A1
point1 = data[p[0],:]
point2 = data[p[1],:]
#ax.arrow(point1[0], point1[1], point2[0] - point1[0], point2[1] - point1[1], head_width=0.1, head_length=0.2, fc='k', ec='k', color='green')
fig.plot([point1[0], point2[0]], [point1[1], point2[1]], color = 'green')
if (circles_and_radis != None):
self.add_circles(plt.gca(), circles_and_radis[0], circles_and_radis[1])
#plt.axes().get_xaxis().set_visible(False)
#plt.axes().get_yaxis().set_visible(False)
#plt.axes().patch.set_visible(False)
plt.axis('off')
if (self.output_dir != None):
file_path = join(self.output_dir,"%s.png"%self.current_index)
fig.savefig(file_path, bbox_inches='tight')
plt.close()
def plot_clusters_and_save(self, data, clusters, name=None, show_plt=False, noise_data_color='k'):
if ((self.output_dir == None) and (show_plt == False)):
return
import seaborn as sns
self.current_index = self.current_index + 1
#fig = draw_clusters(data, clusters, show_plt=show_plt, show_title=False, name=name)
plt.figure(figsize=(800.0 / 72.0, 800.0 / 72.0))
colors = [(1.0, 192.0/255.0, 0), (0, 176.0/255.0, 240.0/255.0)] + sns.color_palette()
fig = draw_clusters(data, clusters, show_plt=show_plt, show_title=False, markersize=self.marker_size,
markeredgecolor=(56.0/255.0,93.0/255.0,138.0/255.0), linewidth=self.line_width,
colors = colors, noise_data_color=noise_data_color)
plt.axis('off')
self.get_or_set_axes_lim(fig, data)
if (self.output_dir != None):
if (name == None):
file_path = join(self.output_dir,"%s.png"%self.current_index)
else:
file_path = join(self.output_dir,"%s.png"%name)
fig.savefig(file_path, bbox_inches='tight')
plt.close()
CLUSTERS_EVALUATION_COLUMNS= [
"Method",
"Params",
"Clusters #",
"NMI",
"AMI",
"ARI",
"RI",
# "Homogeneity",
# "Completeness",
# "V-measure",
# "Silhouette Coefficient"
]
class EvaluationFields(Enum):
method = 1
params = 2
clusters_num = 3
normazlied_mutual_information = 4
adjusted_mutual_information = 5
adjusted_rand_index = 6
rand_index = 7
# homogeneity = 8
# completeness = 9
# v_measure = 10
# silhouette_coefficient = 11
CLUSTERS_EVALUATION_CRITERIA= [
"Method",
"Params",
"Clusters #",
"NMI",
"AMI",
"ARI",
"RI",
# "Homogeneity",
# "Completeness",
# "V-measure",
# "Silhouette Coefficient"
]
def silhouette_safe(X, labels):
try:
return metrics.silhouette_score(X, labels)
except ValueError:
return -1
def rand_index(labels, cluster_assignments):
correct = 0
total = 0
sample_ids = range(len(labels))
for index_combo in itertools.combinations(sample_ids, 2):
index1 = index_combo[0]
index2 = index_combo[1]
same_class = (labels[index1] == labels[index2])
same_cluster = (cluster_assignments[index1]
== cluster_assignments[index2])
if same_class and same_cluster:
correct += 1
elif not same_class and not same_cluster:
correct += 1
total += 1
return float(correct) / total
evaulations_dict = {
EvaluationFields.method : lambda X, labels_true, labels, name, params : name,
EvaluationFields.params : lambda X, labels_true, labels, name, params : params,
EvaluationFields.clusters_num : lambda X, labels_true, labels, name, params : len(np.unique(labels)) - (1 if -1 in labels else 0),
EvaluationFields.normazlied_mutual_information : lambda X, labels_true, labels, name, params : metrics.normalized_mutual_info_score(labels_true, labels),
EvaluationFields.adjusted_mutual_information : lambda X, labels_true, labels, name, params : metrics.adjusted_mutual_info_score(labels_true, labels),
EvaluationFields.adjusted_rand_index : lambda X, labels_true, labels, name, params : metrics.adjusted_rand_score(labels_true, labels),
EvaluationFields.rand_index: lambda X, labels_true, labels, name, params: rand_index(labels_true, labels),
# EvaluationFields.homogeneity : lambda X, labels_true, labels, name, params : metrics.homogeneity_score(labels_true, labels),
# EvaluationFields.completeness : lambda X, labels_true, labels, name, params : metrics.completeness_score(labels_true, labels),
# EvaluationFields.v_measure : lambda X, labels_true, labels, name, params : metrics.v_measure_score(labels_true, labels),
# EvaluationFields.silhouette_coefficient : lambda X, labels_true, labels, name, params : silhouette_safe(X, labels),
}
def format_param(param):
if isinstance(param, float):
return "%0.2f"%param
else:
return param
class MethodsEvaluation:
def __init__(self, output_dir=None):
# key are method names, values are list of scores
self.scores_table = OrderedDict()
self.dbg_plot_session = DebugPlotSession(output_dir)
self.sorted_by = None
def evaulate_method(self, X, labels_true, labels, name, params, show_plt=False):
if (not self.scores_table.has_key(name)):
self.scores_table[name] = []
scores = [evaulations_dict[m](X, labels_true, labels, name, params) for m in EvaluationFields]
self.scores_table[name].append(scores)
self.dbg_plot_session.plot_clusters_and_save(X, labels, name=name, show_plt=show_plt)
return scores
def cluster_params_range_evaluation(self, data, true_labels, base_params, params_range, method, method_name):
for p in itertools.product(*params_range.values()):
func_params = deepcopy(base_params)
for i in xrange(len(params_range)):
func_params[params_range.keys()[i]] = p[i]
clusters = method(**func_params)
params_str = "_".join(["%s=%s"%(k,format_param(j)) for k,j in zip(params_range,p)])
self.evaulate_method(data, true_labels, clusters, method_name, params_str ,show_plt = False)
# for each method, leave only the top n scores
# this will also sort the list
def filter_top_n_by_field_for_method(self, evaluation_field, n):
self.sort_scores_by_field(evaluation_field)
for k in self.scores_table:
self.scores_table[k] = self.scores_table[k][:n]
def sort_scores_by_field(self, evaluation_field):
self.sorted_by = evaluation_field
for k in self.scores_table:
self.scores_table[k].sort(key=lambda x: x[evaluation_field.value-1], reverse=True)
def print_evaluations(self, draw_in_notebook=False):
print "\t".join(CLUSTERS_EVALUATION_COLUMNS)
for k in self.scores_table:
for score in self.scores_table[kneighbors_graph]:
formatted_string_list = [score[:EvaluationFields.clusters_num.value-1]] + \
["%d"%score[EvaluationFields.clusters_num.value-1]] + \
["%0.3f"%s for s in score[EvaluationFields.clusters_num.value:]]
print "\t".join(formatted_string_list)
def get_evaluation_table(self):
formatted_list = []
keys = self.scores_table.keys()
#if self.sorted_by != None:
# keys.sort(key=lambda x: self.scores_table[x][0][self.sorted_by.value - 1], reverse=True)
for k in keys:
for score in self.scores_table[k]:
formatted_list.append(score[:EvaluationFields.clusters_num.value-1] +\
["%d"%score[EvaluationFields.clusters_num.value-1]] +\
["%0.3f"%s for s in score[EvaluationFields.clusters_num.value:]])
return CLUSTERS_EVALUATION_COLUMNS[:], formatted_list
| mit |
ewulczyn/ewulczyn.github.io | ipython/How_Naive_AB_Testing_Goes_Wrong/abstract_abtest.py | 1 | 8520 | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from abtest_util import SimStream
from abc import ABCMeta, abstractmethod
class ABTest(object):
"""
This is the base class for dynamically
terminating AB tests. The idea is that you define
a stopping crietion and evaluate it every n records
until you get the stop signal.
"""
__metaclass__ = ABCMeta
def __init__(self, a_stream, b_stream, test_interval, max_run):
self.a_stream = a_stream # a banner data stream object for banner A
self.b_stream = b_stream # a banner data stream object for banner B
self.max_run = max_run # the maximum number of samples per banner
self.test_interval = test_interval # evalaute stopping criterion every test_interval records
self.a_estimator = None # an object that collects stats on banner A
self.b_estimator = None # an object that collects stats on banner B
self.has_run = False # flag to see if the test has already been run once
def run(self):
"""
This function runs the banners for test_interval records
Until the evaluate_stopping_criterium function returns a winner
or the maximum sample size is reached
"""
if self.has_run:
print ("This test already ran")
return
while True:
a_records = self.a_stream.get_next_records(self.test_interval)
b_records = self.b_stream.get_next_records(self.test_interval)
self.a_estimator.update(a_records)
self.b_estimator.update(b_records)
result = self.evaluate_stopping_criterium()
if result != 'continue':
self.has_run = True
return result
@abstractmethod
def evaluate_stopping_criterium(self):
"""
Each child class needs to define a criterion for stopping the test
"""
pass
def expected_results(TestClass, params, iters):
"""
Evaluates a test with the same parameters multiple times
to get the expected results.
Args:
TestClass: AB Test Class
params: parmaters for instantiating AB Test class
iters: number of times to run the Test object with the set of params
Returns:
Prob(A wins), P(unkown winner), list of run times
"""
num_choose_A = 0.0
unknown_count = 0.0
run_times = []
for i in range(iters):
t = TestClass(*params)
result = t.run()
if result == 'A':
num_choose_A += 1
elif result == 'unknown':
unknown_count += 1
run_times.append(max(t.a_estimator.N, t.b_estimator.N))
return num_choose_A/iters, unknown_count/iters, np.array(run_times)
def expected_results_by_lift(TestClass, params, iters, p_hat, lifts, fig_name=None):
"""
This function generates plots that show the expected results
of the AB test as you change the lift that banner A has over
banner B.
"""
# see how you would do in practice
run_times_list = []
p_A_betters = {"lower": [], "upper":[], "mean": []}
p_unknowns = {"lower": [], "upper":[], "mean": []}
(lower, mean, upper) = p_hat.p_donate_ci(10)
for lift in lifts:
print (lift)
#lower
p_B = p_hat.change_p_donate(lower)
params[0] = SimStream(p_B.lift(lift)) #a_stream
params[1] = SimStream(p_B) #b_stream
p_better, p_unknown, time = expected_results(TestClass, params, iters)
p_A_betters['lower'].append(p_better)
p_unknowns['lower'].append(p_unknown)
# mean
p_B = p_hat
params[0] = SimStream(p_B.lift(lift)) #a_stream
params[1] = SimStream(p_B) #b_stream
p_better, p_unknown, time = expected_results(TestClass, params, iters)
run_times_list.append(time)
p_A_betters['mean'].append(p_better)
p_unknowns['mean'].append(p_unknown)
#upper
p_B = p_hat.change_p_donate(upper)
params[0] = SimStream(p_B.lift(lift)) #a_stream
params[1] = SimStream(p_B) #b_stream
p_better, p_unknown, time = expected_results(TestClass, params, iters)
p_A_betters['upper'].append(p_better)
p_unknowns['upper'].append(p_unknown)
lifts = np.array(lifts)*100
avg_run_times = np.array([np.mean(run_times) for run_times in run_times_list])
lower = [np.percentile(run_times, 5) for run_times in run_times_list]
upper = [np.percentile(run_times, 95) for run_times in run_times_list]
fig = plt.figure(figsize=(13, 8))
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
data = zip(lifts, p_A_betters['mean'], p_unknowns['mean'], avg_run_times)
columns = ['% lift A over B', 'P(Choosing A) Median', 'P(Unknown) Median', 'Avg Time']
df = pd.DataFrame.from_records(data, columns=columns)
ax1.set_ylim([-0.1, 1.1])
ax1.plot(lifts, p_A_betters['mean'], label='P(A wins) median')
ax1.plot(lifts, p_A_betters['lower'], label='P(A wins) lower', alpha=0.31)
ax1.plot(lifts, p_A_betters['upper'], label='P(A wins) upper', alpha=0.31)
ax1.plot(lifts, p_unknowns['mean'], label='P(unknown)')
ax1.set_xlabel('percent lift')
ax1.set_ylabel('probability')
ax1.legend(loc=7)
ax2.set_xlim([lifts[0], lifts[-1]])
ax2.plot(lifts, avg_run_times, label='avg time')
ax2.fill_between(lifts, lower, upper, alpha=0.31, edgecolor='#3F7F4C', facecolor='0.75', linewidth=0)
ax2.set_xlabel('percent lift')
ax2.set_ylabel('sample size')
ax2.legend(loc=1)
plt.show()
if fig_name:
fig.savefig(fig_name)
return df
def expected_results_by_interval(TestClass, params, iters, p_hat, lifts, n1, n2, n3, fig_name=None):
"""
This function generates plots that show the expected results
of the AB test as you change the lift that banner A has over
banner B.
"""
# see how you would do in practice
run_times_list = {"lower": [], "upper":[], "mean": []}
p_A_betters = {"lower": [], "upper":[], "mean": []}
(lower, mean, upper) = p_hat.p_donate_ci(10)
for lift in lifts:
print (lift)
p_B = p_hat
# mean
new_params = list(params)
new_params[0] = SimStream(p_B.lift(lift)) #a_stream
new_params[1] = SimStream(p_B) #b_stream
new_params[2] = n1
p_better, p_unknown, time = expected_results(TestClass, new_params, iters)
run_times_list['mean'].append(time)
p_A_betters['mean'].append(p_better)
#lower
new_params = list(params)
new_params[0] = SimStream(p_B.lift(lift)) #a_stream
new_params[1] = SimStream(p_B) #b_stream
new_params[2] = n2
p_better, p_unknown, time = expected_results(TestClass, new_params, iters)
p_A_betters['lower'].append(p_better)
run_times_list['lower'].append(time)
#upper
new_params = list(params)
new_params[0] = SimStream(p_B.lift(lift)) #a_stream
new_params[1] = SimStream(p_B) #b_stream
new_params[2] = n3
p_better, p_unknown, time = expected_results(TestClass, new_params, iters)
p_A_betters['upper'].append(p_better)
run_times_list['upper'].append(time)
lifts = np.array(lifts)*100
avg_run_times_mean = np.array([np.mean(run_times) for run_times in run_times_list['mean']])
avg_run_times_upper = np.array([np.mean(run_times) for run_times in run_times_list['upper']])
avg_run_times_lower = np.array([np.mean(run_times) for run_times in run_times_list['lower']])
fig = plt.figure(figsize=(13, 8))
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
ax1.set_ylim([-0.1, 1.1])
ax1.plot(lifts, p_A_betters['lower'], label='P(A wins) n = %d' % n1)
ax1.plot(lifts, p_A_betters['mean'], label='P(A wins) n = %d' % n2)
ax1.plot(lifts, p_A_betters['upper'], label='P(A wins) n = %d' % n3)
ax1.set_xlabel('percent lift')
ax1.set_ylabel('probability of choosing A')
ax1.legend(loc=4)
ax2.set_xlim([lifts[0], lifts[-1]])
ax2.plot(lifts, avg_run_times_lower, label='n = %d'% n1)
ax2.plot(lifts, avg_run_times_mean, label='n = %d'% n2)
ax2.plot(lifts, avg_run_times_upper, label='n = %d' % n3)
ax2.set_xlabel('percent lift')
ax2.set_ylabel('sample size')
ax2.legend(loc=1)
plt.show()
if fig_name:
fig.savefig(fig_name)
| mit |
eddowh/nyc-green-taxi-map-visualization | src/utils.py | 1 | 4086 | # -*- coding: utf-8 -*-
import pandas as pd
from functools import reduce
def reduce_taxi_df_memory_usage(df):
"""
Reduce memory footprint of the taxi data.
Parameters
----------
df : pandas.DataFrame
The dataframe that will have its memory footprint reduced.
Returns
-------
df : pandas.DataFrame
The original array with the reduced memory footprint.
"""
# float variables
float_dtype_colnames = [
'extra',
'fare_amount',
'improvement_surcharge',
'mta_tax',
'tip_amount',
'tolls_amount',
'total_amount',
'trip_distance',
]
for colname in float_dtype_colnames:
if df.loc[:, colname].dtype != 'float32':
df.loc[:, colname] = pd.to_numeric(df.loc[:, colname],
downcast='float')
# drop off / pick-up can be specified as `dolocationid` and `pulocationid`
# instead of geographical coordinates
do_pu_longlat_colnames = [
'dropoff_latitude',
'dropoff_longitude',
'pickup_latitude',
'pickup_longitude',
]
if reduce(lambda a, b: a and b,
map(lambda c: c in df.columns, do_pu_longlat_colnames)):
for colname in do_pu_longlat_colnames:
if df.loc[:, colname].dtype != 'float32':
df.loc[:, colname] = pd.to_numeric(df.loc[:, colname],
downcast='float')
do_pu_id_colnames = [
'dolocationid',
'pulocationid',
]
if reduce(lambda x, y: x in df.columns and y in df.columns,
do_pu_id_colnames):
for colname in do_pu_id_colnames:
try:
is_category = df.loc[:, colname].dtype == 'category'
except TypeError:
is_category = False
finally:
if not is_category:
df.loc[:, colname] = df.loc[:, colname] \
.fillna(0) \
.astype('category', ordered=True)
# categorical variables
categorical_dtype_colnames = [
'passenger_count',
'payment_type',
'ratecodeid',
'vendorid',
'trip_type',
]
for colname in categorical_dtype_colnames:
try:
is_category = df.loc[:, colname].dtype == 'category'
except TypeError:
is_category = False
finally:
if not is_category:
df.loc[:, colname] = df.loc[:, colname] \
.fillna(0) \
.astype('category', ordered=True)
# boolean variables
if df.loc[:, 'store_and_fwd_flag'].dtype != 'bool':
df.loc[:, 'store_and_fwd_flag'] = \
df.loc[:, 'store_and_fwd_flag'] == 'Y'
# datetime variables
datetime_dtype_colnames = [
'lpep_dropoff_datetime',
'lpep_pickup_datetime',
]
for colname in datetime_dtype_colnames:
if df.loc[:, colname].dtype != 'datetime64[ns]':
df.loc[:, colname] = pd.to_datetime(df.loc[:, colname],
format='%Y-%m-%dT%H:%M:%S.%f')
return df
def mem_usage(pandas_obj):
"""
Displays memory usage of a dataframe or series.
NOTE: not authored by me (Eddo W. Hintoso).
Courtesy of Josh Devlin from DataQuest.
Source: <https://www.dataquest.io/blog/pandas-big-data/>
Parameters
----------
pandas_obj : pandas.DataFrame, pandas.Series
The pandas object (either a DataFrame or Series) that we will
calculate the memory usage of.
Returns
-------
usage_mb : str
The string representation of the memory usage in megabytes (MB).
"""
if isinstance(pandas_obj, pd.DataFrame):
usage_b = pandas_obj.memory_usage(deep=True).sum()
else: # we assume if not a df it's a series
usage_b = pandas_obj.memory_usage(deep=True)
usage_mb = usage_b / 1024 ** 2 # convert bytes to megabytes
return "{:03.2f} MB".format(usage_mb)
| mit |
quheng/scikit-learn | benchmarks/bench_plot_parallel_pairwise.py | 297 | 1247 | # Author: Mathieu Blondel <[email protected]>
# License: BSD 3 clause
import time
import pylab as pl
from sklearn.utils import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_kernels
def plot(func):
random_state = check_random_state(0)
one_core = []
multi_core = []
sample_sizes = range(1000, 6000, 1000)
for n_samples in sample_sizes:
X = random_state.rand(n_samples, 300)
start = time.time()
func(X, n_jobs=1)
one_core.append(time.time() - start)
start = time.time()
func(X, n_jobs=-1)
multi_core.append(time.time() - start)
pl.figure('scikit-learn parallel %s benchmark results' % func.__name__)
pl.plot(sample_sizes, one_core, label="one core")
pl.plot(sample_sizes, multi_core, label="multi core")
pl.xlabel('n_samples')
pl.ylabel('Time (s)')
pl.title('Parallel %s' % func.__name__)
pl.legend()
def euclidean_distances(X, n_jobs):
return pairwise_distances(X, metric="euclidean", n_jobs=n_jobs)
def rbf_kernels(X, n_jobs):
return pairwise_kernels(X, metric="rbf", n_jobs=n_jobs, gamma=0.1)
plot(euclidean_distances)
plot(rbf_kernels)
pl.show()
| bsd-3-clause |
ajulik1997/SWINE | SWINE.py | 1 | 24500 | ############################################################
#
# Written by Alexander Liptak (Summer Student 2017)
# Date: August 2017
# E-Mail: [email protected]
# Phone: +44 7901 595107
#
# Tested with McStas 2.4
#
############################################################
import os
import sys
from multiprocessing import cpu_count
import numpy as np
from sympy import *
import matplotlib.pyplot as plt
from subprocess import Popen, CREATE_NEW_CONSOLE, check_call
from datetime import datetime
from time import sleep
from glob import glob
from colorama import init, Fore
from shutil import rmtree
from pickle import dump, load
############################################################
# Introdction
############################################################
print("==================================================")
print(" SWINE ")
print("==================================================")
print(" Slit Width Influence on Neutron flux Estimates ")
print("==================================================")
############################################################
# Load ANSI support for coloured text
#
# Colour meaning:
# RED - Error
# YELLOW - Warning
# GREEN - Success
# MAGENTA - Input
############################################################
init(autoreset=True)
############################################################
# Make sure I am running in Windows
############################################################
print("Checking OS...")
if os.name != 'nt':
print(Fore.RED + "This script only works on Windows!")
print(Fore.RED + "Exitting...")
sys.exit()
print(Fore.GREEN + "You are running a compaible Windows-based OS")
############################################################
# Make sure I am running in Python 3 or higher
# (no longer necessary as running embedded python)
############################################################
print("Checking Python version...")
if sys.version_info[0] < 3:
print(Fore.RED + "This script only works on Python 3!")
print(Fore.RED + "Exitting...")
sys.exit()
print(Fore.GREEN + "Compatible embedded Python "+sys.version.split(" ")[0])
############################################################
# Checking the amount of cores system has for running
# multiple simulations without slowing each sim down
############################################################
print("Checking system...")
cores = cpu_count()
print(Fore.GREEN + "Found [" + str(cores) + "] cores!")
############################################################
# Chekc if mcstas, mcrun and mclib are in their default dir
############################################################
print("Checking McStas...")
try:
mcrun = glob('C:\\mcstas*\\bin\\mcrun.bat')[0]
mcstas = glob('C:\\mcstas*\\bin\\mcstas.exe')[0]
mclib = glob(glob('C:\\mcstas*\\lib')[0]+'\\*')
gcc = glob('C:\\mcstas-*\\miniconda*\\Library\\mingw-w64\\bin\\')[0]
pydir = glob('C:\\mcstas-*\\miniconda*\\')[0]
except:
print("McStas is not installed in the default directory!")
print(Fore.RED + "Exitting...")
sys.exit()
print(Fore.GREEN + "Using version: " + mcrun.split('\\')[1])
############################################################
# Set temporary environment variables for McStas and GCC
############################################################
os.environ['PATH']=gcc+';'+pydir
############################################################
# Ask user whether to retrieve interactive plot or run sim
# Included end='' in print statement as a hack for colorama
# incompatibility with non-ANSI input()
# GitHub colorama issue #103
############################################################
print("==================================================")
while True:
print(Fore.MAGENTA + "Would like to run a simulation (S), simulate with debug mode (D), or load a previous plot (L)? [S/D/L] ", end='')
load_or_sim = str(input()).upper()
if load_or_sim == 'L' or load_or_sim == 'S' or load_or_sim == 'D':
if load_or_sim == 'L':
unpickle = True
debug = False
if load_or_sim == 'S':
unpickle = False
debug = False
if load_or_sim == 'D':
unpickle = False
debug = True
break
else:
print(Fore.YELLOW + "That is not a recongnised option!")
############################################################
# If user decided to load previous plot, begin unpickling
# For some reason, all unpickled figures default to tkagg
# so used appropriate maximise commands
# Shows plot and exits
############################################################
if unpickle == True:
print(Fore.MAGENTA + "Drag and drop your .swine file here: ", end='')
pickledplot = input()
print("Loading plot...")
fig = load(open(pickledplot, 'rb'))
figManager = plt.get_current_fig_manager()
figManager.window.showMaximized()
plt.show()
print("Exitting...")
sys.exit()
############################################################
# Opens file for debugging, all external output will be
# piped here
############################################################
if debug == True:
debugfile = open('debug.log', 'a')
debugfile.write("==================================================\n")
############################################################
# Ask user whether to use the default OffSpec-based .instr
# file for this simulation or use their own
############################################################
print("==================================================")
while True:
print(Fore.MAGENTA + "Would like to run from deafult (OffSpec-based) instrument file? [Y/N] ", end='')
default_instr = str(input()).upper()
if default_instr == 'Y' or default_instr == 'N':
break
else:
print(Fore.YELLOW + "That is not a recongnised option!")
############################################################
# If user selected using the default instrument file, slit
# and sample parameter names are set automatically, and
# the user is given choice whether to use the default
# positions or set their own. Then the values for slit and
# sample postions are entered, or defaults are used.
# If the user wants to use their own instrument file, the
# parameters that control McStas slit and sample widths
# and positions need to be entered manually, as do their
# values.
############################################################
cwd = os.getcwd()
if default_instr == "Y":
instr = cwd+'\\resources\\default.instr'
s1w_param = 'slit1_width'
s2w_param = 'slit2_width'
s1p_param = 'slit1_pos'
s2p_param = 'slit2_pos'
sap_param = 'sample_pos'
out_param = 'sample_psd'
print("Enter slit and sample positons after bender (leave empty for default):")
print(Fore.MAGENTA + "McStas position of slit 1 [8.58](m): ", end='')
slit1Pos = float(input() or (8.58))
print(Fore.MAGENTA + "McStas position of slit 2 [13.63](m): ", end='')
slit2Pos = float(input() or (13.63))
print(Fore.MAGENTA + "McStas position of sample [14.03](m): ", end='')
sampPos = float(input() or (14.03))
if default_instr == "N":
print("Make sure your .instr file is formatted as set out in the README!")
print(Fore.MAGENTA + "Drag and drop your .instr file here: ", end='')
instr = input()
print(Fore.MAGENTA + "Enter McStas parameter that controls slit 1 width: ", end='')
s1w_param = str(input())
print(Fore.MAGENTA + "Enter McStas parameter that controls slit 2 width: ", end='')
s2w_param = str(input())
print(Fore.MAGENTA + "Enter McStas parameter that controls slit 1 position: ", end='')
s1p_param = str(input())
print(Fore.MAGENTA + "Enter McStas parameter that controls slit 2 position: ", end='')
s2p_param = str(input())
print(Fore.MAGENTA + "Enter McStas parameter that controls sample position: ", end='')
sap_param = str(input())
print(Fore.MAGENTA + "Enter McStas component name of your PSD_monitor: ", end='')
out_param = str(input())
while True:
try:
print("Enter slit and sample positons for your McStas instrument:")
print(Fore.MAGENTA + "McStas position of slit 1 (m): ", end='')
slit1Pos = float(input())
print(Fore.MAGENTA + "McStas position of slit 2 (m): ", end='')
slit2Pos = float(input())
print(Fore.MAGENTA + "McStas position of sample (m): ", end='')
sampPos = float(input())
break
except:
print(Fore.YELLOW + "Blank and non-numeric input is not allowed, try again!")
############################################################
# Only if using custom instrument file, checks whether
# specified parameters that were entered actually exist
# in the file
############################################################
if default_instr == "N":
if (s1w_param not in open(instr).read() or s1w_param == ''
or s2w_param not in open(instr).read() or s2w_param == ''
or s1p_param not in open(instr).read() or s1p_param == ''
or s2p_param not in open(instr).read() or s2p_param == ''
or sap_param not in open(instr).read() or sap_param == ''
or out_param not in open(instr).read() or out_param == ''):
print(Fore.RED + "The selected instrument file does not use these parameters!")
print(Fore.RED + "Edit your instrument file or re-run this script and try again.")
print(Fore.RED + "Exitting...")
sys.exit()
############################################################
# Compile instrument into C using McStas
# Requred to CD to the folder containing the instrument file
# to get around McStas GitHub Issue #532
############################################################
print("==================================================")
print("Compiling instrument file into C...")
INSTRtoC = mcstas, '-I', ' -I '.join(mclib), '-t', os.path.split(instr)[1]
try:
os.chdir(os.path.split(instr)[0])
if debug == False:
check_call(' '.join(INSTRtoC), creationflags=CREATE_NEW_CONSOLE)
if debug == True:
check_call(' '.join(INSTRtoC), stdout=debugfile, stderr=debugfile)
os.chdir(cwd)
except:
print(Fore.RED + "An unknown error has occured while compiling to C...")
print(Fore.RED + "Exitting...")
sys.exit()
print(Fore.GREEN + "Compiled to C successfully!")
############################################################
# Compile C code into binary
############################################################
print("Compiling C file into binary...")
CtoEXE = 'gcc', '-o', os.path.splitext(instr)[0]+'.exe', os.path.splitext(instr)[0]+'.c', '-g', '-O2','-lm'
try:
if debug == False:
check_call(' '.join(CtoEXE), creationflags=CREATE_NEW_CONSOLE)
if debug == True:
check_call(' '.join(CtoEXE), stdout=debugfile, stderr=debugfile)
except:
print(Fore.RED + "An unknown error has occured while compiling to binary...")
print(Fore.RED + "Exitting...")
sys.exit()
print(Fore.GREEN + "Compiled to binary successfully!")
############################################################
# Data collection that supports default values
############################################################
print("==================================================")
print("Please input the required values or press the return key for defaults.")
print("Default values are in square brackets and required units are in parentheses.")
print(Fore.MAGENTA + "Angle of sample [1.2](degrees): ", end='')
angle = np.deg2rad(float(input() or (1.2)))
print(Fore.MAGENTA + "Maximum allowed penumbra [80](mm): ", end='')
maxPenumbra = float(input() or (80))
print(Fore.MAGENTA + "Number of steps per slit (higer-finer, lower-faster) [50]: ", end='')
steps1 = int(input() or (50))
print(Fore.MAGENTA + "Number of steps per resolution (higer-finer, lower-faster) [50]: ", end='')
steps2 = int(input() or (50))
print(Fore.MAGENTA + "No of neutrons per simulation [1000000]: ", end='')
neutrons = int(input() or (1000000))
print(Fore.MAGENTA + "Plot description (appended to graph title): ", end='')
description = str(input() or (''))
############################################################
# Define necessary values, variables and equations that
# will have to be solved later
# Make sure all distances are in mm
# penumbra is the sympy equation for calulcating the
# penumbra of the footprint with respect to slit widths
# and their separation, as well as the angle of the
# sample
# dQQ is a sympy formula that calculates the resolution
# from slit widths, their positions, and the angle of
# the sample
############################################################
s1s2Sep = (slit2Pos-slit1Pos)*1000
s2SampSep = (sampPos-slit2Pos)*1000
s1 = symbols('s1')
s2 = symbols('s2')
penumbra = (2*((((s1s2Sep+s2SampSep)*(s1+s2))/(2*s1s2Sep))-(s1/2)))/(sin(angle))
dQQ = ((atan((s1+s2)/(s1s2Sep)))/(2*tan(angle)))*100
############################################################
# Set both slit minima to 0, solve penumbra equation for
# maximum allowed slit opening
############################################################
slit1min = 0.0
slit2min = 0.0
slit1max = float(next(iter(solveset(Eq(penumbra.subs(s2,0),maxPenumbra),s1))))
slit2max = float(next(iter(solveset(Eq(penumbra.subs(s1,0),maxPenumbra),s2))))
############################################################
# Create and fill array with all the slit width values
# that will be tested (Simulation 1 only)
############################################################
slit1vals = np.array([])
slit2vals = np.array([])
for i in range(steps1+1):
slit1vals = np.append(slit1vals, slit1min+(i*((slit1max - slit1min)/steps1)))
slit2vals = np.append(slit2vals, slit2min+(i*((slit2max - slit2min)/steps1)))
############################################################
# Create two arrays, correctly sized and filled with
# zeros
# Later, the values that satisfy the constraints will be
# tested and their results will be added to this array
# while those values that do not satisfy the constrains
# will remain as zero
############################################################
intensity = np.zeros((steps1+1,steps1+1))
quality = np.zeros((steps1+1,steps1+1))
############################################################
# Create output directory, if there is some error, closes
############################################################
swinedir = 'SWINE{:[%Y-%m-%d][%H-%M-%S]}'.format(datetime.now())
try:
os.mkdir(swinedir)
except:
print(Fore.RED + "You do not appear to have write permission in this folder!")
print(Fore.RED + "Exitting...")
sys.exit()
############################################################
# Everything ready to start, give user final instructions
############################################################
print("==================================================")
print("The script is now ready to run!")
print("Depending on your settings, this may take over a few hours to complete.")
print("It is recommended to not use the computer while this script is running.")
print(Fore.MAGENTA + "Press any key to continue...", end='')
input()
print("==================================================")
############################################################
# Simulation 1
# Create an empty list that will contain every call to be
# made to McStas
# Create an emty list that will contain debugging
# information
# Solve the penumbra and resolution equations for the
# current combination of slits, and if satisfies the
# constraints, call and debug info are appended to their
# respective lists
# Zero slit width simulations are also skipped due to
# an issue with the definition of a slit in McStas
# (GitHub Issue #522 in McCode)
############################################################
calls1 = []
debug1 = []
for index1, item1 in enumerate(slit1vals):
for index2, item2 in enumerate(slit2vals):
penumbraCurrent = penumbra.subs([(s1,item1),(s2,item2)])
qualityCurrent = dQQ.subs([(s1,item1),(s2,item2)])
quality[index1,index2] = qualityCurrent
if ((penumbraCurrent <= maxPenumbra) \
and (item1 != 0.0 and item2 != 0.0)):
calls1.append([mcrun, instr,
'-d', swinedir+'/A['+str(index1)+']['+str(index2)+']',
'-n', str(neutrons),
s1p_param+'='+str(slit1Pos), s2p_param+'='+str(slit2Pos),
sap_param+'='+str(sampPos),
s1w_param+'='+str(item1/1000), s2w_param+'='+str(item2/1000)])
debug1.append([item1, item2, penumbraCurrent, qualityCurrent])
############################################################
# Simulation 2
# Like previously, two lists are created that will contain
# the calls and debugging information
# The values for minimum and maximum resolution are obtained
# by taking the ceiling and floor functions of the minimum
# and maximum possible resolutions from the previous
# simulations, plus or minus one (respectively)
# For every resolution to be found, the range of s2 values
# that satisfy the maximum penumbra are found, as well as
# the correcponding s1 values. A check is made if either
# of these values are not negative, and a call list is
# generated, along with debugging information
# The final data matrix should be of the format:
# [resolution, [slit 2 widths], [intensities]]
# where the data for the intensity sublist will be
# collected after the simulations complete
############################################################
calls2 = []
debug2 = []
minQ = int(np.ceil(np.amin(quality)))+1
maxQ = int(np.floor(np.amax(quality)))-1
data2 = []
for index, item in enumerate(list(range(minQ, maxQ+1))):
data2.append([])
data2[index].append(item)
s2range = np.delete(np.linspace(0, float(next(iter(solveset(Eq(solveset(Eq(penumbra,maxPenumbra), symbol=s1),solveset(Eq(dQQ,item), symbol=s1)),symbol=s2)))), steps2), 0)
s1range = [float(next(iter(solveset(Eq(dQQ,item), symbol=s1).subs(s2, item)))) for element in s2range]
templist = []
for index2, item2 in enumerate(s2range):
if float(s2range[index2]) > 0 and float(s1range[index2]) > 0:
calls2.append([mcrun, instr,
'-d', swinedir+'/B['+str(item)+']['+str(item2)+']',
'-n', str(neutrons*10),
s1p_param+'='+str(slit1Pos), s2p_param+'='+str(slit2Pos),
sap_param+'='+str(sampPos),
s1w_param+'='+str(s1range[index2]/1000), s2w_param+'='+str(s2range[index2]/1000)])
debug2.append([item, s1range[index2], item2])
templist.append(s2range[index2])
data2[index].append(templist)
data2[index].append([])
############################################################
# Simulation 1
# Runs as many simulations at a time as there are cores
# Keeps count of how manu calls have been made so that
# we run them all and none are missed
# Print debugging information
############################################################
calls1_done = 0
while calls1_done < len(calls1):
running_calls = []
for core in range(0, cores):
if calls1_done < len(calls1):
print('| Sim1',
'|',format(int((calls1_done+1)/len(calls1)*100), '03.0f')+'%',
'| Core:',str(core),
'| S1W:',format(debug1[calls1_done][0], '03.2f'),
'| S2W:',format(debug1[calls1_done][1], '03.2f'),
'| PU:',format(float(debug1[calls1_done][2]), '03.2f'),
'| Res:',format(float(debug1[calls1_done][3]), '03.2f'), '|')
if debug == False:
sim = Popen(calls1[calls1_done], creationflags=CREATE_NEW_CONSOLE)
if debug == True:
sim = Popen(calls1[calls1_done], stdout=debugfile, stderr=debugfile)
running_calls.append(sim)
calls1_done = calls1_done + 1
print("--------------------------------------------------")
for call in running_calls:
sim.wait()
sleep(cores)
############################################################
# Same thing as above but for second set of simulations
############################################################
calls2_done = 0
while calls2_done < len(calls2):
running_calls = []
for core in range(0, cores):
if calls2_done < len(calls2):
print('| Sim2',
'|',format(int((calls2_done+1)/len(calls2)*100), '03.0f')+'%',
'| Core:',str(core),
'| Res:',str(int(debug2[calls2_done][0])),
'| S1W:',format(debug2[calls2_done][1], '03.2f'),
'| S2W:',format(debug2[calls2_done][2], '03.2f'), '|')
if debug == False:
sim = Popen(calls2[calls2_done], creationflags=CREATE_NEW_CONSOLE)
if debug == True:
sim = Popen(calls2[calls2_done], stdout=debugfile, stderr=debugfile)
running_calls.append(sim)
calls2_done = calls2_done + 1
print("--------------------------------------------------")
for call in running_calls:
sim.wait()
sleep(cores)
############################################################
# Reads the specified McRun output file from every subfolder
# If the subfolder is labeled A (sim 1), then the intensity
# scraped from this file is used to update the intensity
# matrix
# If the subfolder is labeled B (sim 2), then the value is
# appended to the correct sublist in the data matrix
############################################################
print("Collecting data...")
os.chdir(swinedir)
sleep(1)
for folder in os.listdir():
dim1 = str(folder).split('][')[0][2:]
dim2 = str(folder).split('][')[1][:-1]
with open(str(folder)+'/'+str(out_param)+'.dat', 'r') as file:
for line in file:
if 'values:' in line:
if str(folder)[0] == 'A':
intensity[int(dim1), int(dim2)] = line.split(' ')[2]
if str(folder)[0] == 'B':
for item in data2:
if int(dim1) == item[0]:
item[2].append(line.split(' ')[2])
break
############################################################
# Deleted the swinedir folder to save space, all needed data
# has been collected already
############################################################
print("Cleaning up...")
os.chdir(cwd)
rmtree(swinedir)
os.remove(os.path.basename(instr))
############################################################
# Cretes a blank figure that will hold two subplots
# Subplot 1 is created, and on it is plotted the heatmap
# generated from the intensity matrix. A colourbar for
# this data is also generated. Resolution contour lines
# are then obtained from the resolution matrix and plotted
# on the same subplot. The title and axis labels are made
# and the tick values are regenerated.
# Subplot 2 is created, and the data matrix is looped over
# so that a line for every resolution is drawn.
# The legend, title and axis lables are also drawn.
############################################################
print("Plotting data...")
fig = plt.figure()
plt.subplot(121)
heatmap = plt.imshow(intensity, cmap='hot', interpolation='nearest')
contour = plt.contour(quality, antialiased=True)
plt.clabel(contour, inline=1, fontsize=10)
plt.colorbar(heatmap)
plt.title('Neutron intensity at varying slit widths | '+description)
plt.xlabel('Slit 2 width (mm)')
plt.ylabel('Slit 1 width (mm)')
plt.xticks(np.linspace(0, len(slit2vals)-1, num=6), np.linspace(round(slit2min, 2), round(slit2max, 2), num=6))
plt.yticks(np.linspace(0, len(slit1vals)-1, num=6), np.linspace(round(slit1min, 2), round(slit1max, 2), num=6))
plt.subplot(122)
for item in data2:
plt.plot(item[1], item[2], '-', label='dQ/Q = '+str(item[0]))
plt.legend()
plt.title('Intensity against slit 2 width at constant resolution | '+description)
plt.xlabel('Slit 2 width (mm)')
plt.ylabel('Intensity')
############################################################
# The window needs to be maximised as the default view
# makes reading the plots impossible.
############################################################
try:
figManager = plt.get_current_fig_manager()
figManager.window.showMaximized()
except:
print(Fore.YELLOW + "Error maximising window, please maximise windows manually!")
############################################################
# Experimental pickle support means it is possible to store
# entire plot in a file and recover it later, intreactive
# Also ahow figure and exit
############################################################
print("Saving figure...")
dump(fig, open(swinedir+'.swine', 'wb'))
print("Opening plot...")
plt.show()
print("Exitting...")
sys.exit() | mit |
dougthor42/PyBank | pybank/gui.py | 1 | 55789 | # -*- coding: utf-8 -*-
# pylint: disable=E1101, C0330
# E1101 = Module X has no Y member
"""
GUI components for PyBank.
Created on Tue May 12 13:21:37 2015
Usage:
gui.py
Options:
-h --help # Show this screen.
--version # Show version.
"""
# ---------------------------------------------------------------------------
### Imports
# ---------------------------------------------------------------------------
# Standard Library
import logging
import decimal
from enum import Enum
# Third Party
import wx
import wx.grid
import wx.lib.plot as wxplot
import numpy as np
try:
from agw import foldpanelbar as fpb
except ImportError:
import wx.lib.agw.foldpanelbar as fpb
# Package / Application
from pybank import (__project_name__,
__version__,
)
from . import utils
from . import crypto
from . import orm
from . import queries
# ---------------------------------------------------------------------------
### Module Constants
# ---------------------------------------------------------------------------
LEDGER_COLOR_ROW_NEW = wx.Colour(240, 240, 240, 255)
LEDGER_COLOR_ROW_ODD = wx.Colour(255, 255, 255, 255)
LEDGER_COLOR_ROW_EVEN = wx.Colour(255, 255, 204, 255)
LEDGER_COLOR_VALUE_NEGATIVE = wx.Colour(255, 0, 0, 255)
LEDGER_COLOR_VALUE_POSITIVE = wx.Colour(0, 0, 0, 255)
DATABASE = "test_database.db"
TITLE_TEXT = "{} v{}".format(__project_name__, __version__)
# ---------------------------------------------------------------------------
### Main GUI
# ---------------------------------------------------------------------------
class MainApp(object):
""" Main App """
def __init__(self):
self.app = wx.App()
self.frame = MainFrame(TITLE_TEXT, (1250, 700))
self.frame.Show()
# Having the GridCellChoiceEditor seems to cause a wxAssertionError
# but doens't appear to do anything bad, so I'm just handling it here
# because it's annoying.
try:
self.app.MainLoop()
except wx.wxAssertionError:
pass
class MainFrame(wx.Frame):
""" Main Window of the PyBank program """
def __init__(self, title, size):
wx.Frame.__init__(self, None, wx.ID_ANY, title=title, size=size)
# Set up some timers for backup and write-to-db
# self.write_db_timer = wx.Timer(self)
# self.write_db_timer.Start(1000)
# logging.debug("Write-to-database timer started")
self.encryption_timer = wx.Timer(self)
self.encryption_timer.Start(5 * 60 * 1000) # Every 5 minutes
logging.info("Encryption timer started")
self._init_ui()
def _init_ui(self):
""" Initi UI Components """
# Create the menu bar and bind events
self.menu_bar = wx.MenuBar()
self._create_menus()
self._bind_events()
# Initialize default states
self._set_defaults()
# Set the MenuBar and create a status bar
self.SetMenuBar(self.menu_bar)
self.CreateStatusBar()
self.panel = MainPanel(self)
self.ledger = self.panel.panel2.ledger_page.ledger
def _create_menus(self):
""" Create each menu for the menu bar """
# TODO: Switch to wx.RibbonBar? It looks pretty nice.
self._create_file_menu()
self._create_edit_menu()
self._create_view_menu()
self._create_tools_menu()
self._create_options_menu()
self._create_help_menu()
def _create_file_menu(self):
"""
Creates the File menu.
wxIDs:
------
+ 101: New
+ 102: Open
+ 103: Exit
"""
# Create the menu and items
self.mfile = wx.Menu()
self.mf_new = wx.MenuItem(self.mfile, 101, "&New\tCtrl+N",
"Create a new PyBank file")
self.mf_open = wx.MenuItem(self.mfile, 102, "&Open\tCtrl+O",
"Open a PyBank file")
self.mf_save = wx.MenuItem(self.mfile, 106, "&Save\tCtrl+S",
"Save the current PyBank file")
self.mf_close = wx.MenuItem(self.mfile, 104, "&Close\tCtrl+W",
"Close the current PyBank file.")
self.mf_open_ofx = wx.MenuItem(self.mfile, 105, "Open OFX File",
"Open an existing OFX and append to the current ledger")
self.mf_exit = wx.MenuItem(self.mfile, 103, "&Exit\tCtrl+Q",
"Exit the application")
# Add menu items to the menu
self.mfile.Append(self.mf_new)
self.mfile.Append(self.mf_open)
self.mfile.Append(self.mf_save)
self.mfile.Append(self.mf_close)
self.mfile.AppendSeparator()
self.mfile.Append(self.mf_open_ofx)
self.mfile.AppendSeparator()
self.mfile.Append(self.mf_exit)
self.menu_bar.Append(self.mfile, "&File")
def _create_edit_menu(self):
"""
Creates the Edit menu
wxIDs:
------
+ 201: ???
+ 202: ???
"""
# Create the menu and items
self.medit = wx.Menu()
self.me_temp = wx.MenuItem(self.medit, 201, "&Temp", "TempItem")
# Add menu items to the menu
self.medit.Append(self.me_temp)
self.menu_bar.Append(self.medit, "&Edit")
def _create_view_menu(self):
"""
Creates the View menu.
wxIDs:
------
+ 301: ???
+ 302: ???
"""
# Create the menu and items
self.mview = wx.Menu()
self.mv_l = wx.Menu()
self.mv_temp = wx.MenuItem(self.mview, 301, "&Temp", "TempItem")
# TODO: There's gotta be a way to auto-populate this menu from the tbl
self.mv_l_date = wx.MenuItem(self.mv_l, 30201, "Date", "",
wx.ITEM_CHECK)
self.mv_l_enter_date = wx.MenuItem(self.mv_l, 30202, "Enter Date", "",
wx.ITEM_CHECK)
self.mv_l_checknum = wx.MenuItem(self.mv_l, 30203, "Check Number", "",
wx.ITEM_CHECK)
self.mv_l_payee = wx.MenuItem(self.mv_l, 30204, "Payee", "",
wx.ITEM_CHECK)
self.mv_l_dlpayee = wx.MenuItem(self.mv_l, 30205, "Downloaded Payee",
"", wx.ITEM_CHECK)
self.mv_l_memo = wx.MenuItem(self.mv_l, 30206, "Memo", "",
wx.ITEM_CHECK)
self.mv_l_cat = wx.MenuItem(self.mv_l, 30207, "Category", "",
wx.ITEM_CHECK)
self.mv_l_label = wx.MenuItem(self.mv_l, 30208, "Label",
"", wx.ITEM_CHECK)
self.mv_l_amount = wx.MenuItem(self.mv_l, 30209, "Amount", "",
wx.ITEM_CHECK)
self.mv_l_balance = wx.MenuItem(self.mv_l, 30210, "Balance", "",
wx.ITEM_CHECK)
# Add menu items to the menu
self.mv_l.Append(self.mv_l_date)
self.mv_l.Append(self.mv_l_enter_date)
self.mv_l.Append(self.mv_l_checknum)
self.mv_l.Append(self.mv_l_payee)
self.mv_l.Append(self.mv_l_dlpayee)
self.mv_l.Append(self.mv_l_memo)
self.mv_l.Append(self.mv_l_cat)
self.mv_l.Append(self.mv_l_label)
self.mv_l.Append(self.mv_l_amount)
self.mv_l.Append(self.mv_l_balance)
self.mview.Append(self.mv_temp)
self.mview.Append(302, "Ledger Columns", self.mv_l)
self.menu_bar.Append(self.mview, "&View")
def _create_tools_menu(self):
"""
"""
# Create the menu and items
self.mtools = wx.Menu()
self.mt_accounts = wx.MenuItem(self.mtools, 401, "&Accounts",
"Add and modify accounts")
# Add menu items to the menu
self.mtools.Append(self.mt_accounts)
self.menu_bar.Append(self.mtools, "&Tools")
def _create_options_menu(self):
"""
"""
# Create the menu and items
self.mopts = wx.Menu()
self.mo_accounts = wx.MenuItem(self.mopts, 501, "&Temp",
"No idea yet")
# Add menu items to the menu
self.mopts.Append(self.mo_accounts)
self.menu_bar.Append(self.mopts, "&Options")
def _create_help_menu(self):
"""
"""
# Create the menu and items
self.mhelp = wx.Menu()
self.mh_about = wx.MenuItem(self.mhelp, 601, "&About",
"Infomation about PyBank")
# Add menu items to the menu
self.mhelp.Append(self.mh_about)
self.menu_bar.Append(self.mhelp, "&Help")
def _set_defaults(self):
"""
"""
self.mv_l_date.Check(True)
self.mv_l_enter_date.Check(False)
self.mv_l_checknum.Check(True)
self.mv_l_payee.Check(True)
self.mv_l_dlpayee.Check(True)
self.mv_l_memo.Check(False)
self.mv_l_cat.Check(True)
self.mv_l_label.Check(True)
self.mv_l_amount.Check(True)
self.mv_l_balance.Check(True)
def _bind_events(self):
""" Bind all initial events """
# File Menu
self.Bind(wx.EVT_MENU, self._on_new, id=101)
self.Bind(wx.EVT_MENU, self._on_open, id=102)
self.Bind(wx.EVT_MENU, self._on_close, id=104)
self.Bind(wx.EVT_MENU, self._on_quit, id=103)
self.Bind(wx.EVT_MENU, self._on_open_ofx, id=105)
self.Bind(wx.EVT_MENU, self._on_save, id=106)
# Edit Menu
# self.Bind(wx.EVT_MENU, self._on_edit_menu1)
# View Menu
# self.Bind(wx.EVT_MENU, self._nothing)
self.Bind(wx.EVT_MENU, self._on_toggle_ledger_col, id=30201, id2=30210)
# Tools Menu
# Options Menu
# Help Menu
# Timers
self.Bind(wx.EVT_TIMER, self._on_encryption_timer, self.encryption_timer)
# self.Bind(wx.EVT_TIMER, self._on_write_db_timer , self.write_db_timer)
# Other
self.Bind(wx.EVT_CLOSE, self._on_close_event, self)
def _on_close_event(self, event):
"""
Catch all of the close events, including those from the window
manager (such as the upper-right "X" button and Alt-F4) as well
as the close events that my program sends like from self._on_quit().
This will handle things like saving any remaining changes, backing
up data, and confirming close.
"""
logging.debug("close event fired!")
save_pybank_file()
self.Destroy()
def _on_quit(self, event):
""" Execute quit actions """
logging.debug("on quit")
self.Close(force=True)
def _on_open(self, event):
""" Open a file """
logging.debug("on open")
dialog = wx.FileDialog(self,
"Choose a PyBank datatbase file to open",
".",
"",
"PyBank database (*.pybank)|*.pybank",
wx.FD_OPEN | wx.FD_FILE_MUST_EXIST,
)
if dialog.ShowModal() == wx.ID_CANCEL:
return
path = dialog.GetPath()
logging.info("Opening file: `{}`".format(path))
self.ledger._setup()
self.ledger.table._pull_data()
self.ledger._format_table()
def _on_open_ofx(self, event):
logging.debug("on open ofx")
dialog = wx.FileDialog(self,
"Choose a OFX file to open",
".",
"",
"OFX files (*.ofx)|*.ofx",
wx.FD_OPEN | wx.FD_FILE_MUST_EXIST,
)
if dialog.ShowModal() == wx.ID_CANCEL:
return
path = dialog.GetPath()
logging.info("Opening OFX file: `{}`".format(path))
def _on_save(self, event):
""" Saves the current pybank file """
logging.info("Saving file...")
save_pybank_file()
def _on_new(self, event):
""" Create a new file """
logging.debug("on new")
logging.info("Creating new file")
logging.error("(Not yet implemented)")
def _on_close(self, event):
""" Create a new file """
logging.debug("on close")
logging.info("Closing current file.")
self.ledger.table.data = [[]]
self.ledger.ClearGrid()
self.ledger._format_table()
def _on_toggle_ledger_col(self, event):
""" Toggles a ledger column on or off """
col_num = event.Id - 30200 # Ledger columns are 0-indexed
# but we always show the # col
# I don't rememer where 30200 comes from, but it's needed
# to make col_num 0-indexed.
new_val = event.IsChecked()
self.panel.panel2.ledger_page.ledger.SetColumnShown(col_num, new_val)
def _on_encryption_timer(self, event):
logging.info("Encryption Timer event start")
save_pybank_file()
def _on_write_db_timer(self, event):
logging.debug("Write_db_timer event!")
class MainPanel(wx.Panel):
""" Main Panel; contains all other panels and elements """
def __init__(self, parent):
wx.Panel.__init__(self, parent)
self.parent = parent
self._init_ui()
def _init_ui(self):
""" Initialize the UI Components """
# Create items
self.splitter = wx.SplitterWindow(self, wx.ID_ANY,
style=wx.SP_LIVE_UPDATE,
)
# self.panel1 = AccountListTree(self.splitter)
self.panel1 = AccountList(self.splitter)
self.panel2 = MainNotebook(self.splitter)
# Set up the splitter attributes
self.splitter.SetMinimumPaneSize(100)
self.splitter.SplitVertically(self.panel1, self.panel2, 200)
# Create layout manager, add items, and set sizer.
self.hbox = wx.BoxSizer(wx.HORIZONTAL)
self.hbox.Add(self.splitter, 1, wx.EXPAND)
self.SetSizer(self.hbox)
class SamplePanel(wx.Panel):
"""
Just a simple test window to put into the splitter.
"""
def __init__(self, parent, colour, label):
wx.Panel.__init__(self, parent, style=wx.BORDER_SUNKEN)
self.SetBackgroundColour(colour)
wx.StaticText(self, -1, label, (5, 5))
class SampleLogPanel(wx.Panel):
""" Simple test panel to put into the notebook """
def __init__(self, parent, colour, label):
wx.Panel.__init__(self, parent, style=wx.BORDER_SUNKEN)
self.parent = parent
self.SetBackgroundColour(colour)
title = wx.StaticText(self, -1, label)
log_style = (wx.TE_MULTILINE
| wx.TE_READONLY
| wx.HSCROLL
)
log_font = wx.Font(10,
family=wx.MODERN,
style=wx.NORMAL,
weight=wx.NORMAL,
underline=False,
faceName='Consolas',
)
self.log = wx.TextCtrl(self, -1, "", style=log_style)
self.log.SetFont(log_font)
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(title, 0, wx.EXPAND)
vbox.Add(self.log, 1, wx.EXPAND)
self.SetSizer(vbox)
class SamplePlotPanel(wx.Panel):
"""
Example plotting using the built-in wx.lib.plot (wxplot).
Keeps things smaller because it doesn't require numpy or matplotlib,
but means more coding and looks a little rougher.
"""
def __init__(self, parent, colour, label):
wx.Panel.__init__(self, parent, style=wx.BORDER_SUNKEN)
self.SetBackgroundColour(colour)
title = wx.StaticText(self, -1, label, (5, 5))
self.fake_x_data = [1, 2, 3, 4, 5, 6, 7]
self.fake_y_data = [15, 13.6, 18.8, 12, 2, -6, 25]
self.client = wxplot.PlotCanvas(self, size=(400, 300))
# Then set up how we're presenting the data. Lines? Point? Color?
tdata = list(zip(self.fake_x_data, self.fake_y_data))
line = wxplot.PolyLine(tdata,
colour='red',
width=2,
drawstyle='steps-post',
)
data = wxplot.PolyMarker(tdata,
legend="Green Line",
colour='red',
width=4,
size=1,
marker='square',
# style=wx.PENSTYLE_SOLID,
)
plot = wxplot.PlotGraphics([line, data],
title="Title",
xLabel="X label",
yLabel="Monies",
)
self.plot = plot
self.client.GridPen = wx.Pen(wx.Colour(230, 230, 230, 255))
self.client.EnableGrid = True
self.client.Draw(plot)
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer.Add(title, 0)
self.sizer.Add(self.client, 0)
self.SetSizer(self.sizer)
class NotebookPages(Enum):
"""
Contains the notebook pages number-name link
"""
summary = 0
ledger = 1
plots = 2
log = 3
class MainNotebook(wx.Notebook):
"""
Notebook container for most everything.
Contains tabs for:
+ Summary
+ Ledger
+ Scheduled Transactions
+ Projected Balances
And perhaps other stuff.
"""
def __init__(self, parent):
wx.Notebook.__init__(self, parent)
self.parent = parent
self._init_ui()
def _init_ui(self):
""" Initialize UI components """
summary_page = SamplePanel(self, "yellow", "Summary Page")
self.AddPage(summary_page, "Summary")
self.ledger_page = LedgerPanel(self)
self.AddPage(self.ledger_page, "Ledger")
# plots_page = SamplePlotPanel(self, "cyan", "Plotting (wx.lib.plot)")
# self.AddPage(plots_page, "Even more stuff")
self.log_page = SampleLogPanel(self, "pink", "Sample logging page")
self.AddPage(self.log_page, "Logs")
utils._init_logging(self.log_page.log, logging.WARNING)
self.Bind(wx.EVT_NOTEBOOK_PAGE_CHANGED, self._on_page_changed)
# self.Bind(wx.EVT_NOTEBOOK_PAGE_CHANGING, self._on_page_changing)
# Show the ledger at start (SetSelection generates events)
self.SetSelection(NotebookPages.ledger.value)
def _on_page_changed(self, event):
"""
Event fires *after* the page is changed.
old is the previous page
new is the page we changed to
sel is which, if any, page was selected.
For example, if I use SetSelection(2), sel might be 1 or 0.
"""
old = event.GetOldSelection()
new = event.GetSelection()
sel = self.GetSelection()
log_txt = "Page Changed: old: {}, new: {}, sel: {}"
logging.debug(log_txt.format(old, new, sel))
if new == NotebookPages.plots.value:
self._change_to_plots()
def _on_page_changing(self, event):
"""
Event fires *before* the page is changed.
Note how old, new, and sel are all the same.
This event can be vetoed.
"""
old = event.GetOldSelection()
new = event.GetSelection()
sel = self.GetSelection()
log_txt = "Page Changing: old: {}, new: {}, sel: {}"
logging.debug(log_txt.format(old, new, sel))
def _change_to_plots(self):
"""
Executes when changing to a plot page.
Fires the plot.draw() method.
"""
# TODO: Change this data grab to sql.
d = self.ledger_page.ledger.GetTable().data
x = np.arange(1, len(d) + 1)
# y = [random.uniform(-1, 1) + _x for _x in x]
y = np.array([x[10] for x in d], dtype=np.float)
client = self.GetPage(NotebookPages.plots.value).client
client.Clear() # XXX: Not working (panel not updating?)
# plot.draw(x, y, 'r')
tdata = list(zip(x, y))
line = wxplot.PolyLine(tdata,
colour='red',
width=2,
drawstyle='steps-post',
)
data = wxplot.PolyMarker(tdata,
legend="Green Line",
colour='red',
width=4,
size=1,
marker='square',
# style=wx.PENSTYLE_SOLID,
)
plot = wxplot.PlotGraphics([line, data],
title="Title",
xLabel="X label",
yLabel="Monies",
)
client.GridPen = wx.Pen(wx.Colour(230, 230, 230, 255))
client.EnableGrid = True
client.Draw(plot)
# pareto = self.GetPage(NotebookPages.plots.value).pareto
# pareto.clear()
# y = np.array([x[4] for x in d], dtype=np.str)
# pareto.draw(y)
class LedgerPanel(wx.Panel):
"""
The transaction ledger
Should contain the following columns:
+ Transaction Date
+ Entered Date
+ CheckNum
+ Payee DisplayName
+ Downloaded Payee
+ Memo
+ Category
+ Label
+ Amount # do I want separate payment/income columns? probably red/black
+ Balance
"""
def __init__(self, parent):
wx.Panel.__init__(self, parent)
self.parent = parent
self._init_ui()
def _init_ui(self):
""" Initialize UI components """
self.header_bar = LedgerHeaderBar(self)
self.ledger = LedgerGrid(self)
self.summary_bar = LedgerSummaryBar(self)
self.summary_bar._update()
self.vbox = wx.BoxSizer(wx.VERTICAL)
self.vbox.Add(self.header_bar, 0, wx.EXPAND)
self.vbox.Add(self.ledger, 1, wx.EXPAND)
self.vbox.Add(self.summary_bar, 0, wx.EXPAND)
self.SetSizer(self.vbox)
class LedgerGridBaseTable(wx.grid.GridTableBase):
"""
"""
columns = utils.LedgerCols
# -----------------------------------------------------------------------
### Magic Methods
# -----------------------------------------------------------------------
def __init__(self, parent):
wx.grid.GridTableBase.__init__(self)
self.parent = parent
self.column_labels, self.col_types = self._set_columns()
self.data = []
# TODO: move amount and balance to numpy arrays?
# flag for when the data has been changed with respect to the database
self.data_is_modified = False
self.row_is_new = False
# pull the category strings and create the choicelist.
self.cat_data = [(row.category_id, row.name, row.parent)
for row in queries.query_category()]
self.choicelist = utils.build_cat_strings(self.cat_data)
self._pull_data()
# -----------------------------------------------------------------------
### Override Methods
# -----------------------------------------------------------------------
def GetNumberRows(self):
# TODO: Change to database query?
# rows = pbsql.db_query_single(DATABASE,
# "SELECT COUNT(*) FROM v_ledger_0")[0]
rows = len(self.data)
return rows + 1
def GetNumberCols(self):
try:
return len(self.data[0])
except IndexError:
return len(self.columns)
def IsEmptyCell(self, row, column):
# logging.debug("IsEmptyCell(row={}, col={})".format(row, column))
try:
return not self.data[row][column]
except IndexError:
return True
def GetValue(self, row, col):
"""
Get the cell value from the data or database.
Override Method
Is called on every cell at init and then again when cells are
clicked.
"""
return str(self._get_value(row, col))
def SetValue(self, row, col, value):
"""
Sets the value of a cell.
Override Method.
"""
self._set_value(row, col, value)
def GetColLabelValue(self, column):
return self.column_labels[column]
def GetTypeName(self, row, column):
return self.col_types[column]
def CanGetValueAs(self, row, column, type_name):
column_type = self.col_types[column].split(":")[0]
return type_name == column_type
def CanSetValueAs(self, row, column, type_name):
return self.CanGetValueAs(row, column, type_name)
# -----------------------------------------------------------------------
### Private Methods
# -----------------------------------------------------------------------
def _set_columns(self):
"""
Sets the columns for the ledger.
"""
# TODO: make column order depend only on the DB view
labels = [_i.col_name for _i in self.columns]
types = [_i.col_type for _i in self.columns]
for _i, title in enumerate(labels):
self.SetColLabelValue(_i, title)
return (labels, types)
# def _calc_balance(self):
# balance = decimal.Decimal('200') # Starting balance
#
#
# for row in self.data:
# balance += decimal.Decimal(row[-2])
# row[-1
@utils.logged
def _pull_data(self):
# grab the table data from the database
self.data = []
# convert the query results to something usable by the grid
# Also calculate the running balancefor the view.
# TODO: I hate this - come up with an alternate solution
starting_bal = decimal.Decimal(200)
balance = starting_bal
for row_num, row_data in enumerate(queries.query_ledger_view()):
data_dict = row_data.__dict__
row_values = []
for item in self.columns:
if item.view_name is None:
continue
row_values.append(data_dict[item.view_name])
balance += decimal.Decimal(row_values[-1])
row_values[-1] = str(row_values[-1])
row_values.append(str(balance))
self.data.append(row_values)
# update the summary bar. Need to go to the grandparent.
try:
self.parent.parent.summary_bar._update()
except AttributeError:
# on initialization, the summary_bar object hasn't been created
# yet so we just ignore the error. LedgerPanel._init_ui() takes
# care of updating the summary_bar
pass
# since we just pulled the data, we know that the data hasn't been
# modified yet.
self.data_is_modified = False
def _get_value(self, row, col):
"""
Private logic for the GetValue() override method.
"""
# logging.debug("Getting value of r{}c{}".format(row, column))
try:
value = self.data[row][col]
except IndexError:
return ''
if col == self.columns.category.index:
try:
return utils.build_cat_string(value, self.cat_data)
except TypeError:
return ''
if value is None or value == 'None':
return ''
else:
return str(value)
def _set_value(self, row, col, value):
"""
Updates the data value for a given cell.
Does not attempt to update the database
"""
logging.info("Setting r{}c{} to `{}`".format(row, col, value))
try:
self._update_row(row, col, value)
except IndexError:
# add a new row
logging.info("Update failed. Adding new row")
self._insert_row(row, col, value)
# tell the grid that we've added a row
logging.debug("GRIDTABLE_NOTIFY_ROWS_APPENDED")
action = wx.grid.GRIDTABLE_NOTIFY_ROWS_APPENDED
msg = wx.grid.GridTableMessage(self, action, 1)
else: # run only if no *unhandled* errors
# tell the grid to display the new values
logging.debug("GRIDTABLE_REQUEST_VIEW_GET_VALUES")
action = wx.grid.GRIDTABLE_REQUEST_VIEW_GET_VALUES
msg = wx.grid.GridTableMessage(self, action)
finally: # always runs
pass
self.GetView().ProcessTableMessage(msg)
self.parent._format_table()
# self.data_is_modified = True
def _send_row(self, row):
"""
Sends a new or modified row to the database
"""
try:
logging.info("Attempting to write data to database")
# TODO: do I want to change the logic for row_is_new?
# I can either leave it as it is now, using the ledger to
# determine if a row is new, or I can change it so that
# I query the database with the row value and call it a
# new row iff it doesn't exist in the database.
# I'll have to think about that a bit.
if self.row_is_new:
queries.insert_ledger(acct=1,
date=None,
enter_date=None,
check_num=None,
amount="123.4",
payee=None,
category=None,
label=None,
memo=None,
fitid=-1,
)
else: # row is not new, but rather was updated
self._update_row(row)
except TypeError:
# TODO: more exact error conditions
logging.exception("Error writing to database!", stack_info=True)
else:
logging.info("DB write successful.")
logging.debug(orm.session.new)
logging.debug(orm.session.dirty)
orm.session.commit()
self._pull_data()
def _update_row(self, row, col, value):
"""
Updates a row in the ledger
"""
logging.info("Trying Updating row %s", row)
prev = list(self.data[row]) # need list() to force copy
logging.info("Previous: %s", prev)
# Make sure we write the cateogry ID instead of the string
if col == self.columns.category.index:
try:
# +1 because lists are 0-indexed and my table is 1-indexed
value = self.choicelist.index(value) + 1
except ValueError:
# value not found. For now, lets just use None
# TODO: figure out how I want to handle this.
value = None
self.row_is_new = False
self.data[row][col] = value
new = self.data[row]
logging.info("New.....: %s", self.data[row])
trans_id = self.data[row][self.columns.trans_id.index]
# create a dict of the colums:values
update_dict = {}
for i, (p, n) in enumerate(zip(prev, new)):
if p != n:
col_name = [x.view_name for x in self.columns if x.index == i][0]
update_dict[col_name] = n
# make sure we don't try and update the trans_id
try:
del update_dict[self.columns.trans_id.col_name]
except KeyError:
pass
# XXX: Hack while I figure out how to handle some things
# if we have a "Category" col, we have to rename it to "category_id"
if self.columns.category.view_name in update_dict.keys():
temp = update_dict[self.columns.category.view_name]
del update_dict[self.columns.category.view_name]
update_dict['category_id'] = temp
logging.info("Update Dict: %s", update_dict)
if not update_dict:
# no values to update, so just return.
return
try:
queries.update_transaction(trans_id, update_dict)
except TypeError:
# TODO: more exact error conditions
logging.exception("Error writing to database!", stack_info=True)
else:
logging.info("DB write successful.")
logging.debug("New Items: {}".format(orm.session.new))
logging.debug("Dirty Items: {}".format(orm.session.dirty))
orm.session.commit()
self._pull_data()
def _insert_row(self, row, col, value):
"""
Inserts a new row into the ledger orm table.
"""
logging.info("Inserting row %s", row)
self.row_is_new = True
self.data.append([None] * self.GetNumberCols())
self.data[row][0] = "-1"
# Make sure we write the cateogry ID instead of the string
if col == self.columns.category.index:
try:
# +1 because lists are 0-indexed and my table is 1-indexed
value = self.choicelist.index(value) + 1
except ValueError:
# value not found. For now, lets just use None
# TODO: figure out how I want to handle this.
value = None
logging.info("New Value: %s", self.data[row])
# create a dict of the colums:values
insert_dict = {}
col_name = [x.view_name for x in self.columns if x.index == col][0]
insert_dict[col_name] = value
# make sure we don't try and update the trans_id
try:
del insert_dict[self.columns.trans_id.view_name]
except KeyError:
pass
# make sure that we have an "amount" value
if self.columns.amount.view_name not in insert_dict.keys():
insert_dict[self.columns.amount.view_name] = "0"
# XXX: Hack while I figure out how to handle some things
# if we have a "Category" col, we have to rename it to "category_id"
if self.columns.category.view_name in insert_dict.keys():
temp = insert_dict[self.columns.category.view_name]
del insert_dict[self.columns.category.view_name]
insert_dict['category_id'] = temp
logging.info("Update Dict: %s", insert_dict)
if not insert_dict:
# no values to update, so just return.
return
try:
queries.insert_transaction(insert_dict)
except TypeError:
# TODO: more exact error conditions
logging.exception("Error writing to database!", stack_info=True)
else:
logging.info("DB write successful.")
logging.debug("New Items: {}".format(orm.session.new))
logging.debug("Dirty Items: {}".format(orm.session.dirty))
orm.session.commit()
self._pull_data()
class LedgerGrid(wx.grid.Grid):
"""
"""
def __init__(self, parent):
logging.info("Initializing LedgerGrid")
wx.grid.Grid.__init__(self, parent, wx.ID_ANY)
self.parent = parent
self._setup()
choiceEditor = wx.grid.GridCellChoiceEditor(self.table.choicelist,
allowOthers=True)
for row in range(self.GetNumberRows()):
self.SetCellEditor(row, 7, choiceEditor)
@utils.logged
def _setup(self):
logging.info("Running LedgerGrid._setup()")
self.table = LedgerGridBaseTable(self)
self.SetTable(self.table, takeOwnership=True)
self.SetRowLabelSize(30)
self.SetMargins(0, 0)
self.AutoSizeColumns(True)
self._bind_events()
self._format_table()
@utils.logged
def _bind_events(self):
logging.info("Binding events for LedgerGrid")
self.Bind(wx.grid.EVT_GRID_CELL_LEFT_DCLICK,
self._on_left_dclick,
self)
self.Bind(wx.grid.EVT_GRID_CELL_RIGHT_CLICK,
self._on_right_click,
self)
self.Bind(wx.grid.EVT_GRID_CELL_LEFT_CLICK,
self._on_left_click,
self)
self.Bind(wx.grid.EVT_GRID_CELL_CHANGED,
self._on_grid_cell_changed,
self)
self.Bind(wx.grid.EVT_GRID_CELL_CHANGING,
self._on_grid_cell_changing,
self)
@utils.logged
def _format_table(self):
""" Formats all table properties """
logging.info("Formatting table")
self._color_rows()
self._align_columns()
self._color_dollars()
@utils.logged
def _color_rows(self):
""" Color alternating rows and color the last row light grey """
logging.info("Coloring rows")
num_rows = self.GetNumberRows()
for row in range(num_rows):
attr = wx.grid.GridCellAttr()
if row == num_rows - 1:
attr.SetBackgroundColour(LEDGER_COLOR_ROW_NEW)
elif row % 2 == 0:
attr.SetBackgroundColour(LEDGER_COLOR_ROW_EVEN)
else:
attr.SetBackgroundColour(LEDGER_COLOR_ROW_ODD)
self.SetRowAttr(row, attr)
def _align_columns(self):
""" Sets the alignment for each column """
logging.info("Setting column alignment")
num_cols = self.GetNumberCols()
for column in range(num_cols):
attr = wx.grid.GridCellAttr()
if column in (3, 9, 10):
attr.SetAlignment(wx.ALIGN_RIGHT, wx.ALIGN_CENTER)
else:
attr.SetAlignment(wx.ALIGN_LEFT, wx.ALIGN_CENTER)
self.SetColAttr(column, attr)
def _color_dollars(self):
""" Colors negative amounts and balances as red """
logging.info("Coloring negative balances")
num_rows = self.GetNumberRows() - 1
for row in range(num_rows):
for col in (9, 10):
try:
val = float(self.GetCellValue(row, col))
except ValueError:
logging.warning("Unable to convert r%sc%s to float. Assuming 0 for row coloring.", row, col)
val = 0
if val < 0:
self.SetCellTextColour(row, col,
LEDGER_COLOR_VALUE_NEGATIVE)
else:
self.SetCellTextColour(row, col,
LEDGER_COLOR_VALUE_POSITIVE)
def _on_left_dclick(self, event):
# TODO: get cell coord from event
rc = (event.GetRow(), event.GetCol())
logging.debug("double left click on cell {}".format(rc))
if self.CanEnableCellControl():
self.EnableCellEditControl()
def _on_right_click(self, event):
rc = (event.GetRow(), event.GetCol())
logging.debug("right-click detected on cell {}".format(rc))
def _on_left_click(self, event):
"""
Fires when a left-click happens.
1. Record the current cursor position
2. Move the grid cursor to the new cell (default behavior of this
event)
3. If there is modified data, attempt to add it to the database.
"""
logging.debug("Left-click detected")
previous_rc = (self.GetGridCursorRow(), self.GetGridCursorCol())
new_rc = (event.GetRow(), event.GetCol())
# Don't do anything if we haven't moved grid location
if previous_rc == new_rc:
logging.debug("Cursor didn't move.")
return
self.SetGridCursor(*new_rc)
#
# logging.debug("previous_rc = {}".format(previous_rc))
# logging.debug("new_rc = {}".format(new_rc))
#
# if self.table.data_is_modified and new_rc[0] != previous_rc[0]:
# # TODO: Fill this out
# try:
# logging.info("Attempting to write data to database")
# queries.insert_ledger(acct=1,
# date=None,
# enter_date=None,
# check_num=None,
# amount="123.4",
# payee=None,
# category=None,
# label=None,
# memo=None,
# fitid=-1,
# )
# except TypeError:
# logging.exception("Error writing to database!", stack_info=True)
# else:
# logging.info("DB write successful.")
# logging.debug(orm.session.new)
# logging.debug(orm.session.dirty)
# orm.session.commit()
# self.parent.summary_bar._update()
# self.table.data_is_modified = False
def _on_grid_cell_changed(self, event):
""" Fires after a cell's data has changed. """
logging.debug("grid cell changed")
logging.debug("{}".format(event))
def _on_grid_cell_changing(self, event):
""" Fires before a cell's data is changed. Can be vetoed. """
logging.debug("grid cell about to change")
logging.debug("{}".format(event))
if event.GetCol() == 3: # CheckNum column
try:
int(event.GetString())
except ValueError:
log_msg = "Unable to cast '{}' to int. Reverting."
logging.warning(log_msg.format(event.GetString()))
event.Veto()
elif event.GetCol() == 10: # Blanace Column
logging.warning("Can't change the 'Balance' column")
event.Veto()
elif event.GetCol() == 9: # Amount Column
try:
decimal.Decimal(event.GetString())
except decimal.InvalidOperation:
log_msg = "Unable to cast '{}' to type `Decimal`. Reverting."
logging.warning(log_msg.format(event.GetString()))
event.Veto()
else:
pass
# TODO: Add some way to highlight which account is active. A button, mayhaps?
class AccountList(wx.Panel):
""" List of the accounts """
def __init__(self, parent):
wx.Panel.__init__(self, parent)
self.parent = parent
self._account_list = None
self._account_list = ["acct1", "acct2", "acct3", "acct4"]
self._account_groups = ["Group1", "Group2"]
self._init_ui()
def _init_ui(self):
""" Initialize UI components """
# Set style info for the FoldPanelBar (CaptionBars)
self.style = fpb.CaptionBarStyle()
self.style.SetCaptionStyle(fpb.CAPTIONBAR_GRADIENT_H)
color1 = wx.Colour((0, 255, 255))
color2 = wx.Colour((255, 255, 255))
self.style.SetFirstColour(color1)
self.style.SetSecondColour(color2)
# Create items
titlefont = wx.Font(16,
wx.FONTFAMILY_DEFAULT,
wx.FONTSTYLE_NORMAL,
wx.FONTWEIGHT_BOLD,
)
self.title = wx.StaticText(self, wx.ID_ANY,
label="Accounts",
size=(-1, -1),
style=wx.ALIGN_CENTER,
)
self.title.SetFont(titlefont)
# Create the entire FoldPanelBar
self._bar = fpb.FoldPanelBar(self, wx.ID_ANY, size=(500, -1))
# Create the 1st fold panel.
self.fp_1 = self._bar.AddFoldPanel("caption", collapsed=False,
cbstyle=self.style)
# Create items that belong to 1st fold panel
# Note that items must be created then added immediatly.
text1 = wx.StaticText(self.fp_1, wx.ID_ANY, label="Hello")
self._bar.AddFoldPanelWindow(self.fp_1, text1)
text2 = wx.StaticText(self.fp_1, wx.ID_ANY, label="Goodbye")
self._bar.AddFoldPanelWindow(self.fp_1, text2)
# Create 2nd fold panel
self.fp_2 = self._bar.AddFoldPanel("2nd panel", collapsed=False,
cbstyle=self.style)
# create items to add to the 2nd fold panel
text3 = wx.StaticText(self.fp_2, wx.ID_ANY, label="Panel2 text3")
self._bar.AddFoldPanelWindow(self.fp_2, text3)
text4 = wx.StaticText(self.fp_2, wx.ID_ANY, label="Panel2 text4")
self._bar.AddFoldPanelWindow(self.fp_2, text4)
# Create the layout managers, add items, and set the sizer
self.hbox = wx.BoxSizer(wx.HORIZONTAL)
self.hbox.Add(self._bar, 0, wx.EXPAND)
self.vbox = wx.BoxSizer(wx.VERTICAL)
self.vbox.Add(self.title, 0, wx.EXPAND)
self.vbox.Add((-1, 3), 0, wx.EXPAND)
self.vbox.Add(self.hbox, 1, wx.EXPAND)
self.SetSizer(self.vbox)
class AccountListTree(wx.Panel):
"""
"""
def __init__(self, parent):
wx.Panel.__init__(self, parent, wx.ID_ANY)
# wx.gizmos.TreeListCtrl.__init__(self, parent, wx.ID_ANY)
self._init_ui()
def _init_ui(self):
tc_style = (wx.TR_DEFAULT_STYLE
| wx.TR_HAS_BUTTONS
# | wx.TR_HAS_VARIABLE_ROW_HEIGHT
# | wx.TR_TWIST_BUTTONS
# | wx.TR_ROW_LINES
# | wx.TR_COLUMN_LINES
| wx.TR_FULL_ROW_HIGHLIGHT
)
# Create items
titlefont = wx.Font(16,
wx.FONTFAMILY_DEFAULT,
wx.FONTSTYLE_NORMAL,
wx.FONTWEIGHT_BOLD,
)
self.title = wx.StaticText(self, wx.ID_ANY,
label="Accounts",
size=(-1, -1),
style=wx.ALIGN_CENTER,
)
self.title.SetFont(titlefont)
self.tree = wx.TreeCtrl(self,
wx.ID_ANY,
style=tc_style,
)
self.root = self.tree.AddRoot("Accounts")
# self.tree.SetItemText(self.root, "col1 root")
group1 = self.tree.AppendItem(self.root, "Group1")
group2 = self.tree.AppendItem(self.root, "Group2")
acct1 = self.tree.AppendItem(group1, "acct1")
acct2 = self.tree.AppendItem(group1, "acct2")
acct3 = self.tree.AppendItem(group2, "acct3")
acct4 = self.tree.AppendItem(group2, "acct4")
# self.tree.SetItemText(child, "col1")
# self.tree.Expand(self.root)
self.tree.ExpandAll()
self.vbox = wx.BoxSizer(wx.VERTICAL)
self.vbox.Add(self.title, 0, wx.EXPAND)
self.vbox.Add((-1, 3), 0, wx.EXPAND)
self.vbox.Add(self.tree, 1, wx.EXPAND)
self.SetSizer(self.vbox)
class LedgerSummaryBar(wx.Panel):
""" The legder summary bar """
def __init__(self, parent):
wx.Panel.__init__(self, parent)
self.parent = parent
# TODO: Use decimal.Decimal types for balances
self._online_bal = decimal.Decimal("0.00") # online balance
self._avail_bal = decimal.Decimal("0.00") # online available balance
self._curr_bal = decimal.Decimal("0.00") # current balance
self._num_trans = 0 # number of transactions
# can't fill with spaces because the text isn't fixed width and
# I haven't set the wx.StaticText object to be fixed width.
self._trans_fmt = "{:0>6} Transactions"
self._trans_text = self._trans_fmt.format(self._num_trans)
self._online_fmt = "Online Balance: {:<16s}"
self._online_text = self._online_fmt.format(utils.moneyfmt(self._online_bal))
self._avail_fmt = "Avilable Balance: {:<16s}"
self._avail_text = self._avail_fmt.format(utils.moneyfmt(self._avail_bal))
self._curr_fmt = "Current Balance: {:<16s}"
self._curr_text = self._curr_fmt.format(utils.moneyfmt(self._curr_bal))
self._init_ui()
def _init_ui(self):
""" Initialize UI components """
# Create various items
# TODO: These displays can be instances of the same class
self._num_trans_display = wx.StaticText(self, wx.ID_ANY,
label=self._trans_text,
size=(-1, -1),
)
self._online_display = wx.StaticText(self, wx.ID_ANY,
label=self._online_text,
size=(-1, -1),
)
self._avail_display = wx.StaticText(self, wx.ID_ANY,
label=self._avail_text,
size=(-1, -1),
)
self._curr_display = wx.StaticText(self, wx.ID_ANY,
label=self._curr_text,
size=(-1, -1),
)
# Create layout managers and add items
self.hbox = wx.BoxSizer(wx.HORIZONTAL)
self.hbox.Add(self._num_trans_display, 0, wx.EXPAND)
self.hbox.Add((30, -1), 1, wx.EXPAND)
self.hbox.Add(self._online_display, 0, wx.EXPAND)
self.hbox.Add((30, -1), 0, wx.EXPAND)
self.hbox.Add(self._avail_display, 0, wx.EXPAND)
self.hbox.Add((30, -1), 0, wx.EXPAND)
self.hbox.Add(self._curr_display, 0, wx.EXPAND)
self.SetSizer(self.hbox)
@utils.logged
def _update(self):
""" Updates the ledger summary """
logging.info("updating summary bar")
data = self.parent.ledger.table.data # Should I not do this because
# of memory usage?
self.online_balance = decimal.Decimal('0.00')
self.num_transactions = len(data)
self.available_balance = decimal.Decimal('0.00')
try:
self.current_balance = decimal.Decimal(data[-1][-1])
except IndexError:
self.current_balance = decimal.Decimal('0.00')
@property
def online_balance(self):
""" Returns the online balance """
return self._online_bal
@online_balance.setter
def online_balance(self, value):
""" Sets the online balance """
self._online_bal = value
self._online_text = self._online_fmt.format(utils.moneyfmt(value))
self._online_display.SetLabel(self._online_text)
@property
def num_transactions(self):
""" Gets the number of transactions """
return self._num_trans
@num_transactions.setter
def num_transactions(self, value):
""" Sets the number of transactions """
self._num_trans = value
self._trans_text = self._trans_fmt.format(value)
self._num_trans_display.SetLabel(self._trans_text)
@property
def available_balance(self):
""" Gets the online available balance """
return self._avail_bal
@available_balance.setter
def available_balance(self, value):
""" Sets the online available balance """
self._avail_bal = value
self._avail_text = self._avail_fmt.format(utils.moneyfmt(value))
self._avail_display.SetLabel(self._avail_text)
@property
def current_balance(self):
""" Gets the current balance """
return self._curr_bal
@current_balance.setter
def current_balance(self, value):
""" Sets the current balance """
self._curr_bal = value
self._curr_text = self._curr_fmt.format(utils.moneyfmt(value))
self._curr_display.SetLabel(self._curr_text)
class LedgerHeaderBar(wx.Panel):
""" The ledger header bar """
def __init__(self, parent):
wx.Panel.__init__(self, parent)
self.parent = parent
self._init_ui()
def _init_ui(self):
""" Initialize UI components """
self.title_bar = wx.StaticText(self, wx.ID_ANY,
"<placeholder for account name>",
style=wx.ALIGN_CENTER,
)
vbox = wx.BoxSizer(wx.HORIZONTAL)
vbox.Add(self.title_bar, 1, wx.EXPAND)
self.SetSizer(vbox)
# ---------------------------------------------------------------------------
### Functions
# ---------------------------------------------------------------------------
@utils.logged
def save_pybank_file(filename="test_database.pybank"):
"""
Save the pybank file by dumping the database to a string and then
writing that to an encrypted file.
"""
logging.info("Saving file to '%s'", filename)
# Get the required encryption stuff
key = crypto.get_key()
# dump the memory database directly to an encrypted file.
dump = queries.sqlite_iterdump(orm.engine, orm.session)
dump = "".join(line for line in dump)
dump = dump.encode('utf-8')
crypto.encrypted_write(filename, key, dump)
# ---------------------------------------------------------------------------
### Run module as standalone
# ---------------------------------------------------------------------------
if __name__ == "__main__":
MainApp()
| gpl-2.0 |
ConnorMcMahon/geoinference | python/src/geolocate/gimethods/wheres_wally/method.py | 2 | 20113 | ##
# Copyright (c) 2015, David Jurgens
#
# All rights reserved. See LICENSE file for details
##
import collections
import os.path
import logging
from sklearn import naive_bayes # import GaussianNB
from sklearn import svm
from sklearn import preprocessing
import numpy
import scipy.sparse
import gzip
import time
from collections import defaultdict
from geolocate import GIMethod, GIModel
from geolocate.geocoder import Geocoder
LOGGER = logging.getLogger(os.path.basename(__file__))
class Wheres_Wally_Model(GIModel):
def __init__(self, user_id_to_location):
self.user_id_to_location = user_id_to_location
def infer_post_location(self, post):
if not "user" in post:
return None
user = post["user"]
if not "id" in user:
return None
# If we know this user's location, report their home location
user_id = user['id']
if user_id in self.user_id_to_location:
return self.user_id_to_location[user_id]
else:
return None
def infer_posts_by_user(self, posts):
if len(posts) == 0:
return None
# Each post is assumed originate from the user's home location, so just
# infer the first post's location
home_location = self.infer_post_location(posts[0])
if home_location is None:
return None
# Then fill the array of post locations with the home location
locations = []
for i in range(0, len(posts)):
locations.append(home_location)
return locations
class Wheres_Wally(GIMethod):
def __init__(self):
# Location is represented as a lat/lon geopy Point
self.user_id_to_location = {}
self.geocoder = None;
self.unique_locations = set()
self.id_to_location = {}
# Mappings from feature names to their corresponding indices in a
# feature vector
self.pop_bin_feature_indices = {}
self.reciprocal_feature_indices = {}
self.triad_feature_indices = {}
self.total_num_features = 0
# The SVM classifier and feature vector scaler
self.location_classifier = None
self.location_vector_scaler = None
def train_model(self, settings, dataset, model_dir):
# Initialize the geocoder, which we'll use to resolve location strings.
# We use the default name-to-location mapping unless the user has
# specified otherwise.
if 'location_source' in settings:
self.geocoder = Geocoder(dataset=settings['location_source'])
else:
self.geocoder = Geocoder()
# NOTE: The original paper used the directional friends/followers
# network. However, the paper was tested on a much smaller network
# (9.8M edges), which doesn't scale when including the full network. We
# opt for using the bi-directional networks as these (1) provide a
# stronger signal of social relationships and (2) significantly reduce
# the memory requirement.
LOGGER.debug('Loading mention network')
mention_network = dataset.bi_mention_network()
# This dict will contain a mapping from user ID to an associated home
# location, which is derived either from the location field (as in the
# original paper), from GPS-tagged tweets, or from both
user_to_home_loc = {}
# For each of the users that we have in the network, see if we can
# associate that user with a home location.
all_users = set(mention_network.nodes_iter())
LOGGER.debug('Calculating users with recognizable home location')
num_users_processed = 0
# Keep track of how many times each location occurred. We'll filter
# this down to only the most common locations
location_counts = collections.Counter()
for user_id, home_loc in dataset.user_home_location_iter():
if not user_id in all_users:
continue
# home_loc is a (lat,lon) tuple. While this is accurate, we want to
# coarsen the location data to decrease sparsity (i.e., more people
# located in the same city location, despite slightly different
# underlying lat/lon values). Here, use the Geocoder to map the
# lat/lon to a name and then back to a canonical lat/lon for that
# name
canonical_lat_lon = self.geocoder.canonicalize(home_loc[0], home_loc[1])
location_counts[canonical_lat_lon] += 1
user_to_home_loc[user_id] = canonical_lat_lon
num_users_processed += 1
if num_users_processed % 500000 == 0:
LOGGER.debug('Processed %s of the %s users, associated %s a known location (%s)'
% (num_users_processed, len(all_users), len(user_to_home_loc),
len(user_to_home_loc) / float(num_users_processed)))
# Iterate through the locations pruning out those that do not occur more
# than some threshold number of times
num_locs_removed = 0
for lat_lon, count in location_counts.iteritems():
if count >= 20:
self.unique_locations.add(lat_lon)
else:
num_locs_removed += 1
LOGGER.debug('Saw %d locations, %d with at least 5 users, %d to be pruned'
% (len(location_counts), len(self.unique_locations), num_locs_removed))
# Remove the home locations of users whose locations aren't in the
# pruned list of minimum-frequency locations
num_user_home_locs_removed = 0
for user_id, loc in user_to_home_loc.items():
if not loc in self.unique_locations:
del user_to_home_loc[user_id]
num_user_home_locs_removed += 1
LOGGER.debug('After pruning removed home locations of %d users, %d still have homes'
% (num_user_home_locs_removed, len(user_to_home_loc)))
# Create a bi-directional mapping from locations to unique
# numeric identifiers. This mapping will be used when
# representing locations in the classifier feature space and
# when converting classifier output to specific locations
location_to_id = {}
for loc in self.unique_locations:
id_ = len(location_to_id)
location_to_id[loc] = id_
self.id_to_location[id_] = loc
# Associate each location with its set of features
n = len(self.unique_locations)
# Each location has 7 features associated with it for classifying a
# user's location. The seven features per location are arranged next to
# each other in the feature space.
feature_offset = 0
for loc in self.unique_locations:
# Feat1: it's population bin (size approx.)
self.pop_bin_feature_indices[loc] = feature_offset
# Feat2: the number of reciprocal friends
self.reciprocal_feature_indices[loc] = feature_offset + 1
# Feat3-7: the bins indicating how many friends were in reciprocal
# triads in that city
for bin_num in range(0, 5):
feat = "%s,%s:%s" % (loc[0], loc[1], bin_num)
self.triad_feature_indices[feat] = feature_offset + bin_num + 2
# Increment the feature offset so the next city's features don't
# collide with this city's indices
feature_offset += 7
# Set the total number of features seen
self.total_num_features = feature_offset
LOGGER.debug('Saw %d unique locations, %d total featurs'
% (len(self.unique_locations), feature_offset))
LOGGER.debug('Associated %s of the %s users with a known location (%s unique)'
% (len(user_to_home_loc), len(all_users), len(self.unique_locations)))
# The list of locations for each corresponding user in X
B = []
# Train the classifier based on users with known home locations
LOGGER.debug("Generating feature vectors for training")
X = scipy.sparse.lil_matrix((len(user_to_home_loc),
self.total_num_features), dtype=numpy.float64)
print X
row = 0
total_nz = 0
for user_id, location in user_to_home_loc.iteritems():
# Skip users whose locations were omitted due to frequency filtering
# or who have home locations but are not in the mention network
#if not location in self.unique_locations or not user_id in all_users:
# continue
# Fill the row in the matrix corresponding to this user's features
nz = self.fill_user_vector(user_id, mention_network,
user_to_home_loc, X, row)
total_nz += nz
# Get the index of this user's location
location_id = location_to_id[location]
B.append(location_id)
row += 1
X = X.tocsr()
#X = X.toarray()
LOGGER.debug("Generated training data for %d users, %d nz features, %f on average"
% (row, total_nz, float(total_nz) / row))
# Convert the location list into a numpy array for use with scikit
Y = numpy.asarray(B)
if len(X.nonzero()[0]) == 0:
LOGGER.warning("Too little training data seen and no user had non-zero feature "+
"values. Cowardly aborting classification")
else:
# Use SVM classifier with a linear kernel.
#
# NOTE NOTE NOTE NOTE
#
# The original paper uses an RBF kernel with their SVM. However,
# this proved impossibly slow during testing, so a linear kernel was
# used instead.
#
# NOTE NOTE NOTE NOTE
#
# slow: self.location_classifier = svm.SVC(kernel='rbf')
#self.location_classifier = svm.LinearSVC(dual=False)
#self.location_classifier = svm.NuSVC(kernel='rbf', verbose=True, max_iter=1000)
#self.location_classifier = naive_bayes.BernoulliNB()
self.location_classifier = svm.LinearSVC(dual=False, loss='l2', penalty="l2",
tol=1e-2)
# Note: we expect the vector representations to be sparse, so avoid mean
# scaling since it would create dense vectors, which would blow up the
# memory consumption of the model
self.location_vector_scaler = preprocessing.StandardScaler(with_mean=False)
# Learn the scaling parameters and then rescale the input
LOGGER.debug("Scaling feature vectors for training")
X_scaled = self.location_vector_scaler.fit_transform(X.astype(numpy.float64))
LOGGER.debug("Training classifier")
self.location_classifier.fit(X_scaled, Y)
LOGGER.debug("Finished training classifier")
# Assign all the users some location, if we can figure it out
users_assigned = 0
users_seen = 0
for user_id in all_users:
users_seen += 1
# If we know where to place this user, assign it to their home location
if user_id in user_to_home_loc:
self.user_id_to_location[user_id] = user_to_home_loc[user_id]
# Otherwise try to infer the location
else:
location = self.infer_location(user_id, mention_network,
user_to_home_loc)
if not location is None:
self.user_id_to_location[user_id] = location
users_assigned += 1
if users_seen % 100000 == 0:
LOGGER.debug((("Saw %d/%d users, knew location of %d, " +
"inferred the location of %d (total: %d)")
% (users_seen, len(all_users),
len(self.user_id_to_location) - users_assigned,
users_assigned,
len(self.user_id_to_location))))
LOGGER.debug((("Ultimately saw %d/%d users, knew location of %d, " +
"inferred the location of %d (total: %d)")
% (users_seen, len(all_users),
len(self.user_id_to_location) - users_assigned,
users_assigned,
len(self.user_id_to_location))))
# Short circuit early if the caller has specified that the model is not
# to be saved into a directory
if model_dir is None:
return Wheres_Wally_Model(self.user_id_to_location)
if not os.path.exists(model_dir):
os.mkdir(model_dir)
# Write the .tsv for human debugability too
fh = gzip.open(os.path.join(model_dir, 'user-to-lat-lon.tsv.gz'), 'w')
for user_id, loc in self.user_id_to_location.iteritems():
fh.write("%s\t%s\t%s\n" % (user_id, loc[0], loc[1]));
fh.close()
return Wheres_Wally_Model(self.user_id_to_location)
def infer_location(self, user_id, mention_network, user_to_home_loc):
"""
Infers and returns the location of the provided users based on their
features in the network
"""
# Ensure that the model has been trained; otherwise, report an
# empty classification
if self.location_vector_scaler is None or self.location_classifier is None:
return None
# Convert the user's network-based features into a numeric vector
X = scipy.sparse.lil_matrix((1, self.total_num_features), dtype=numpy.float64)
self.fill_user_vector(user_id, mention_network, user_to_home_loc, X, 0)
X = X.tocsr()
# Rescale the vector according to the training data's scaling
user_vector_scaled = self.location_vector_scaler.transform(X)
# Classify the results
location_id = self.location_classifier.predict(user_vector_scaled)[0]
# Convert the index into a location
return self.id_to_location[location_id]
def fill_user_vector(self, user_id, mention_network, user_to_home_loc,
csr_matrix, row_to_fill):
"""
Creates a vector for the user and fills their data into the
specified row in the provided matrix
"""
feat_dict = self.create_user_vector(user_id, mention_network,
user_to_home_loc)
nz = 0
for col, val in feat_dict.iteritems():
csr_matrix[row_to_fill, col] = val
nz += 1
return nz
def create_user_vector(self, user_id, mention_network, user_to_home_loc):
"""
Creates a vector to use with SciPy that represents this user's features
"""
# The binned location features look at all the locations of this user's
# neighbors and then provide a weight for each location according to how
# many of the user's friends are in that location multiplied by how
# large the city is, which is represented as one of five bins
location_to_friends = defaultdict(list)
location_to_followers = defaultdict(list)
num_friends = mention_network.degree(user_id)
# Record which friend appear in each city
for neighbor_id in mention_network.neighbors_iter(user_id):
if neighbor_id in user_to_home_loc:
location_name = user_to_home_loc[neighbor_id]
location_to_friends[location_name].append(neighbor_id)
location_to_followers[location_name].append(neighbor_id)
# Since the vector is expected to be very sparse, create it as a dict
# for the indices with non-zero feature values.
classifier_input_vector = {}
num_non_zero_features = 0
# Each city/location generates 7 unique features in the best performing
# system
for city, followers_in_city in location_to_followers.iteritems():
n = len(followers_in_city)
# Feature 1: the city's bin multiplied by the number of users in the
# city
city_bin = self.get_city_bin(n)
pop_bin_feature_index = self.pop_bin_feature_indices[city]
classifier_input_vector[pop_bin_feature_index] = city_bin
for city, friends_in_city in location_to_friends.iteritems():
n = len(friends_in_city)
# Feature 2: the percentage of friends with reciprocal edges at that
# location
num_reciprocal_friends = 0
for n1 in friends_in_city:
if mention_network.has_edge(n1, user_id):
num_reciprocal_friends += 1
num_non_zero_features += 1
reciprocal_feature_index = self.reciprocal_feature_indices[city]
classifier_input_vector[reciprocal_feature_index] = num_reciprocal_friends / n
if num_reciprocal_friends > 0:
num_non_zero_features += 1
# Features 3-7: the number of triads in the city
triad_counter = collections.Counter()
for n1 in friends_in_city:
num_triads = 0
for n2 in friends_in_city:
if mention_network.has_edge(n1, n2):
num_triads += 1
# Decide which bin this user is in
triad_counter[self.get_triad_bin(num_triads)] += 1
for bin_num, count in triad_counter.iteritems():
feat = "%s,%s:%s" % (city[0], city[1], bin_num)
triad_bin_feature_index = self.triad_feature_indices[feat]
classifier_input_vector[triad_bin_feature_index] = count / num_friends
if count > 0:
num_non_zero_features += 1
return classifier_input_vector
def get_triad_bin(self, num_triads):
"""
Returns which bin this count of the number of triads should be in
"""
# Bins in the paper [0,5,10,20,40]
if num_triads < 5:
return 0
elif num_triads < 10:
return 1
elif num_triads < 20:
return 2
elif num_triads < 40:
return 3
else:
return 4
def get_city_bin(self, city_size):
"""
Returns which bin this count of the number of triads should be in
"""
# Bins in the paper [1,2,4,12,57054]
if city_size <= 1:
return 0
elif city_size <= 2:
return 1
elif city_size <= 4:
return 2
elif city_size <= 12:
return 3
# This sould be 57054, but we use any value larger than 12 to
# avoid the edge case where a city has more than 57k users
else:
return 4
def load_model(self, model_dir, settings):
"""
Reads in the Where's Wally model from a gzipped .tsv
"""
user_id_to_location = {}
model_file = gzip.open(os.path.join(model_dir, "user-to-lat-lon.tsv.gz"), 'r')
for line in model_file:
cols = line.split("\t")
user_id = cols[0]
lat = float(cols[1])
lon = float(cols[2])
user_id_to_location[user_id] = (lat, lon)
model_file.close()
return Wheres_Wally_Model(user_id_to_location)
| bsd-3-clause |
slinderman/pyhsmm_spiketrains | experiments/make_figure3.py | 1 | 9359 | """
Measure the number of inferred states as a function of:
- number of observed neurons
- time bin size
- length of recording
- firing rate
"""
import os
import cPickle
import numpy as np
import matplotlib.pyplot as plt
plt.ion()
from pyhsmm.util.text import progprint_xrange
from pyhsmm_spiketrains.models import PoissonHDPHMM
import matplotlib
matplotlib.rcParams.update({'font.sans-serif' : 'Helvetica',
'axes.labelsize': 9,
'xtick.labelsize' : 9,
'ytick.labelsize' : 9,
'axes.titlesize' : 11})
import brewer2mpl
allcolors = brewer2mpl.get_map("Set1", "Qualitative", 9).mpl_colors
from hips.plotting.layout import *
from experiment_helper import load_synth_data
# Set the random seed for reproducibility
np.random.seed(0)
T = 2000
N = 50
K = 100
alpha = 12.0
gamma = 12.0
alpha_obs = 1.0
beta_obs = 1.0
N_iters = 1000
def K_used(model):
return (model.state_usages > 0).sum()
def fit_model(data, N_iters, args={}):
# Now fit the model with a model using all the data
default_args = dict(N=N,
K_max=K,
alpha=alpha,
gamma=gamma,
alpha_obs=alpha_obs,
beta_obs=beta_obs,
init_state_concentration=1.0)
default_args.update(args)
model = PoissonHDPHMM(**default_args)
model.add_data(data)
def _evaluate(model):
ll = model.log_likelihood()
return ll, K_used(model)
def _step(model):
model.resample_model()
return _evaluate(model)
results = [_step(model) for _ in progprint_xrange(N_iters)]
lls = np.array([r[0] for r in results])
Ks = np.array([r[1] for r in results])
return lls, Ks
def test_all(data):
return fit_model(data, N_iters)
def test_N(data, N_test):
"""
:param test_frac: Fraction of all neurons to use for fitting
:return:
"""
# Downsample the data
test_neurons = np.random.permutation(N)[:N_test]
test_data = data[:,test_neurons].copy('C')
assert test_data.shape[1] == N_test
return fit_model(test_data, N_iters, args={"N": N_test})
def test_T(data, T_test):
"""
:param test_frac: Fraction of all neurons to use for fitting
:return:
"""
# Downsample the data
test_data = data[:T_test,:].copy('C')
return fit_model(test_data, N_iters)
def test_dt(data, freq):
"""
:param freq: Number of time bins to aggregate
:return:
"""
# Aggregate time bins
test_data = data.reshape((T//freq, freq, N)).sum(1).copy('C')
assert np.all(test_data[0,:] == data[:freq,:].sum(0))
return fit_model(test_data, N_iters)
def test_fr(true_model, scale):
# Get the true rate, scale it, and resample the data
true_rate = true_model.states_list[0].rate
test_rate = scale * true_rate
assert np.all(test_rate >= 0)
test_data = np.random.poisson(test_rate)
return fit_model(test_data, N_iters)
def fit_with_subsets_of_data(true_model, data, results_dir,
N_repeats=10):
# Generate synth data
K_true = K_used(true_model)
# Experiments with subsets of neurons
results_file = os.path.join(results_dir, "Ks_vs_N.pkl")
if os.path.exists(results_file):
with open(results_file, "r") as f:
Ns_test, Ks_Ns, _ = cPickle.load(f)
else:
Ns_test = (np.array([0.1, 0.2, 0.5, 0.8, 1.0]) * N).astype(np.int)
Ks_Ns = []
for N_test in Ns_test:
Ks_N = []
for rpt in xrange(N_repeats):
print "N_test: ", N_test, ". Repeat: ", rpt
_, Ks = test_N(data, N_test)
Ks_N.append(Ks[-1])
Ks_Ns.append(Ks_N)
with open(results_file, "w") as f:
cPickle.dump((Ns_test, Ks_Ns, K_true), f, protocol=-1)
# Experiments with subsets of time bins
results_file = os.path.join(results_dir, "Ks_vs_T.pkl")
if os.path.exists(results_file):
with open(results_file, "r") as f:
Ts_test, Ks_Ts, _ = cPickle.load(f)
else:
Ts_test = (np.array([0.1, 0.2, 0.5, 0.8, 1.0]) * T).astype(np.int)
Ks_Ts = []
for T_test in Ts_test:
Ks_T = []
for rpt in xrange(N_repeats):
print "T_test: ", T_test, ". Repeat: ", rpt
_, Ks = test_T(data, T_test)
Ks_T.append(Ks[-1])
Ks_Ts.append(Ks_T)
with open(results_file, "w") as f:
cPickle.dump((Ts_test, Ks_Ts, K_true), f, protocol=-1)
# Experiments with varying firing rates
results_file = os.path.join(results_dir, "Ks_vs_fr.pkl")
if os.path.exists(results_file):
with open(results_file, "r") as f:
frs_test, Ks_frs, _ = cPickle.load(f)
else:
frs_test = np.array([0.1, 0.5, 1.0, 2.0, 10.0])
Ks_frs = []
for fr_test in frs_test:
Ks_fr = []
for rpt in xrange(N_repeats):
print "fr_test: ", fr_test, ". Repeat: ", rpt
_, Ks = test_fr(true_model, fr_test)
Ks_fr.append(Ks[-1])
Ks_frs.append(Ks_fr)
with open(results_file, "w") as f:
cPickle.dump((frs_test, Ks_frs, K_true), f, protocol=-1)
# Experiments with varying time bin size
results_file = os.path.join(results_dir, "Ks_vs_dt.pkl")
if os.path.exists(results_file):
with open(results_file, "r") as f:
dts_test, Ks_dts, _ = cPickle.load(f)
else:
dts_test = np.array([1,2,4,5,10])
Ks_dts = []
for dt_test in dts_test:
Ks_dt = []
for rpt in xrange(N_repeats):
print "dt_test: ", dt_test, ". Repeat: ", rpt
_, Ks = test_dt(data, dt_test)
Ks_dt.append(Ks[-1])
Ks_dts.append(Ks_dt)
with open(results_file, "w") as f:
cPickle.dump((dts_test, Ks_dts, K_true), f, protocol=-1)
return K_true, \
Ns_test, Ks_Ns, \
Ts_test, Ks_Ts, \
frs_test, Ks_frs, \
dts_test, Ks_dts
def plot_results(K_true,
Ns_test, Ks_Ns,
Ts_test, Ks_Ts,
frs_test, Ks_frs,
dts_test, Ks_dts,
figdir="."):
# Plot the number of inferred states as a function of params
fig = create_figure((5,3))
# K vs num neurons
ax = create_axis_at_location(fig, 0.6, 2., 1.7, .8, transparent=True)
ax.boxplot(Ks_Ns, positions=np.arange(1,1+len(Ns_test)),
boxprops=dict(color=allcolors[1]),
whiskerprops=dict(color=allcolors[0]),
flierprops=dict(color=allcolors[1]))
ax.set_xticklabels(Ns_test)
ax.plot([0,6], [K_true, K_true], ':k')
plt.xlim(0.5,5.5)
plt.ylim(0,100)
ax.set_xlabel("$C$")
ax.set_ylabel("Number of States", labelpad=-0.1)
plt.figtext(0.05/5, 2.8/3, "A")
# K vs time
ax = create_axis_at_location(fig, 3.1, 2., 1.7, .8, transparent=True)
ax.boxplot(Ks_Ts, positions=np.arange(1,1+len(Ts_test)),
boxprops=dict(color=allcolors[1]),
whiskerprops=dict(color=allcolors[0]),
flierprops=dict(color=allcolors[1]))
ax.set_xticklabels(Ts_test)
ax.plot([0,6], [K_true, K_true], ':k')
plt.xlim(0.5,5.5)
plt.ylim(0,100)
ax.set_xlabel("$T$")
ax.set_ylabel("Number of States", labelpad=-0.1)
plt.figtext(2.55/5, 2.8/3, "B")
ax = create_axis_at_location(fig, .6, .5, 1.7, .8, transparent=True)
ax.boxplot(Ks_frs, positions=np.arange(1,1+len(frs_test)),
boxprops=dict(color=allcolors[1]),
whiskerprops=dict(color=allcolors[0]),
flierprops=dict(color=allcolors[1]))
ax.set_xticklabels(frs_test)
ax.plot([0,6], [K_true, K_true], ':k')
plt.xlim(0.5,5.5)
plt.ylim(0,100)
ax.set_xlabel("$\lambda$ scale")
ax.set_ylabel("Number of States", labelpad=-0.1)
plt.figtext(0.05/5, 1.3/3, "C")
ax = create_axis_at_location(fig, 3.1, .5, 1.7, .8, transparent=True)
ax.boxplot(Ks_dts, positions=np.arange(1, 1+len(dts_test)),
boxprops=dict(color=allcolors[1]),
whiskerprops=dict(color=allcolors[0]),
flierprops=dict(color=allcolors[1]))
ax.set_xticklabels(dts_test)
ax.plot([0,6], [K_true, K_true], ':k')
plt.xlim(0.5,5.5)
plt.ylim(0,100)
ax.set_xlabel("$\Delta t$ scale")
ax.set_ylabel("Number of States", labelpad=-0.1)
plt.figtext(2.55/5, 1.3/3, "D")
plt.savefig(os.path.join(figdir, "figure2.pdf"))
plt.savefig(os.path.join(figdir, "figure2.png"))
if __name__ == "__main__":
# Load the data
N_repeats = 10
modelname = "hdp-hmm"
T = 2000
T_test = 200
K = 100
N = 50
version = 1
runnum = 1
dataset = "synth_%s_T%d_K%d_N%d_v%d" % (modelname, T, K, N, version)
results_dir = os.path.join("results", dataset, "run%03d" % runnum)
true_model, data, _, _, _ = \
load_synth_data(T, K, N, T_test=T_test,
model=modelname, version=version)
res = fit_with_subsets_of_data(true_model, data, results_dir, N_repeats)
plot_results(*res, figdir=results_dir)
| mit |
NoMoKeTo/c3nav | src/classes/room.py | 1 | 1491 | import numpy as np
from flask.ext.babel import gettext as _
from matplotlib.path import Path
from .location import Location
class Room(Location):
ltype = 'room'
priority = 2
def __init__(self, graph, name, level, titles, shape):
super().__init__(name, titles)
self.graph = graph
self.level = level
self.shape = shape
self.nodes = []
self.pois = []
self.barriers = []
self.groups = []
mpl_xy = self.shape+self.shape[-1:]
mpl_codes = [Path.MOVETO] + [Path.LINETO]*len(self.shape)
self.mpl_path = Path(np.array(mpl_xy), codes=mpl_codes)
@property
def priority(self):
return 1 if self.groups else 2
def contains_position(self, position):
if position.level != self.level:
return False
return self.mpl_path.contains_point((position.x, position.y))
def get_barriers(self):
return (b for b in self.graph.barriers
if b.level == self.level and self.mpl_path.intersects_path(b.mpl_path, True))
def barrier_paths(self):
return [self.mpl_path] + [b.mpl_path for b in self.barriers]
@property
def subtitle(self):
if not self.groups:
return _('Level %(level)d', level=self.level)
else:
return _('%(roomgroup)s, Level %(level)d', roomgroup=self.groups[0].collection_title, level=self.level)
def __repr__(self):
return 'Room(%s)' % repr(self.name)
| apache-2.0 |
benoitsteiner/tensorflow | tensorflow/examples/learn/mnist.py | 45 | 3999 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This showcases how simple it is to build image classification networks.
It follows description from this TensorFlow tutorial:
https://www.tensorflow.org/versions/master/tutorials/mnist/pros/index.html#deep-mnist-for-experts
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import metrics
import tensorflow as tf
layers = tf.contrib.layers
learn = tf.contrib.learn
def max_pool_2x2(tensor_in):
return tf.nn.max_pool(
tensor_in, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def conv_model(feature, target, mode):
"""2-layer convolution model."""
# Convert the target to a one-hot tensor of shape (batch_size, 10) and
# with a on-value of 1 for each one-hot vector of length 10.
target = tf.one_hot(tf.cast(target, tf.int32), 10, 1, 0)
# Reshape feature to 4d tensor with 2nd and 3rd dimensions being
# image width and height final dimension being the number of color channels.
feature = tf.reshape(feature, [-1, 28, 28, 1])
# First conv layer will compute 32 features for each 5x5 patch
with tf.variable_scope('conv_layer1'):
h_conv1 = layers.convolution2d(
feature, 32, kernel_size=[5, 5], activation_fn=tf.nn.relu)
h_pool1 = max_pool_2x2(h_conv1)
# Second conv layer will compute 64 features for each 5x5 patch.
with tf.variable_scope('conv_layer2'):
h_conv2 = layers.convolution2d(
h_pool1, 64, kernel_size=[5, 5], activation_fn=tf.nn.relu)
h_pool2 = max_pool_2x2(h_conv2)
# reshape tensor into a batch of vectors
h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
# Densely connected layer with 1024 neurons.
h_fc1 = layers.dropout(
layers.fully_connected(
h_pool2_flat, 1024, activation_fn=tf.nn.relu),
keep_prob=0.5,
is_training=mode == tf.contrib.learn.ModeKeys.TRAIN)
# Compute logits (1 per class) and compute loss.
logits = layers.fully_connected(h_fc1, 10, activation_fn=None)
loss = tf.losses.softmax_cross_entropy(target, logits)
# Create a tensor for training op.
train_op = layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer='SGD',
learning_rate=0.001)
return tf.argmax(logits, 1), loss, train_op
def main(unused_args):
### Download and load MNIST dataset.
mnist = learn.datasets.load_dataset('mnist')
### Linear classifier.
feature_columns = learn.infer_real_valued_columns_from_input(
mnist.train.images)
classifier = learn.LinearClassifier(
feature_columns=feature_columns, n_classes=10)
classifier.fit(mnist.train.images,
mnist.train.labels.astype(np.int32),
batch_size=100,
steps=1000)
score = metrics.accuracy_score(mnist.test.labels,
list(classifier.predict(mnist.test.images)))
print('Accuracy: {0:f}'.format(score))
### Convolutional network
classifier = learn.Estimator(model_fn=conv_model)
classifier.fit(mnist.train.images,
mnist.train.labels,
batch_size=100,
steps=20000)
score = metrics.accuracy_score(mnist.test.labels,
list(classifier.predict(mnist.test.images)))
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
MicrosoftResearch/Azimuth | azimuth/models/ensembles.py | 1 | 10070 | import numpy as np
import sklearn.linear_model
import sklearn.ensemble as en
from sklearn.grid_search import GridSearchCV
import sklearn
from sklearn.linear_model import LinearRegression
import scipy as sp
from regression import linreg_on_fold
import sklearn
import sklearn.tree as tree
from sklearn import svm
from sklearn.grid_search import GridSearchCV
from sklearn.cross_validation import cross_val_score
def spearman_scoring(clf, X, y):
y_pred = clf.predict(X).flatten()
return sp.stats.spearmanr(y_pred, y.flatten())[0]
def adaboost_on_fold(feature_sets, train, test, y, y_all, X, dim, dimsum, learn_options, classification=False):
'''
AdaBoostRegressor/Classifier from scikitlearn.
'''
if learn_options['adaboost_version'] == 'python':
if not learn_options['adaboost_CV']:
if not classification:
clf = en.GradientBoostingRegressor(loss=learn_options['adaboost_loss'], learning_rate=learn_options['adaboost_learning_rate'],
n_estimators=learn_options['adaboost_n_estimators'],
alpha=learn_options['adaboost_alpha'],
subsample=1.0, min_samples_split=2, min_samples_leaf=1, max_depth=learn_options['adaboost_max_depth'],
init=None, max_features=None,
verbose=0, max_leaf_nodes=None, warm_start=False, random_state=learn_options['seed'])
else:
clf = en.GradientBoostingClassifier(learning_rate=learn_options['adaboost_learning_rate'],
n_estimators=learn_options['adaboost_n_estimators'],
subsample=1.0, min_samples_split=2, min_samples_leaf=1, max_depth=learn_options['adaboost_max_depth'],
init=None, max_features=None,
verbose=0, max_leaf_nodes=None, warm_start=False, random_state=learn_options['seed'])
clf.fit(X[train], y[train].flatten())
y_pred = clf.predict(X[test])[:, None]
else: # optimize the parameters if the adaboosted algorithm
if learn_options["algorithm_hyperparam_search"]=="bo":
print
from hyperopt import hp, fmin, tpe, rand
def adaboost_scoring_bo(params):
# label_encoder = sklearn.preprocessing.LabelEncoder()
# label_encoder.fit(y_all['Target gene'].values[train])
# gene_classes = label_encoder.transform(y_all['Target gene'].values[train])
# n_folds = len(np.unique(gene_classes))
cv = sklearn.cross_validation.KFold(y_all['Target gene'].values[train].shape[0], n_folds=20, shuffle=True)
est = en.GradientBoostingRegressor(n_estimators=1000, learning_rate=params['learning_rate'], max_depth=params['max_depth'],
min_samples_leaf=params['min_samples_leaf'], max_features=params['max_features'], random_state=learn_options['seed'])
scorer = cross_val_score(est, X[train], y[train].flatten(), cv=cv, n_jobs=20)
return np.median(scorer)
space = {
'learning_rate': hp.uniform('learning_rate', 0.001, 0.1),
'max_depth': hp.quniform('max_depth', 1, 8, 1),
'min_samples_leaf': hp.quniform('min_samples_leaf', 3, 20, 1),
'max_features': hp.uniform('max_features', 0.05, 1.0)}
best = fmin(adaboost_scoring_bo, space, algo=tpe.suggest, max_evals=50, verbose=1)
print best
clf = en.GradientBoostingRegressor(n_estimators=learn_options['adaboost_n_estimators'],
learning_rate=best['learning_rate'],
max_depth=best['max_depth'],
min_samples_leaf=best['min_samples_leaf'],
max_features=best['max_features'], random_state=learn_options['seed'])
clf.fit(X[train], y[train].flatten())
elif learn_options["algorithm_hyperparam_search"]=="grid":
assert not classification, "need to tweak code below to do classificaton, as above"
n_jobs = 20
print "Adaboost with GridSearch"
from sklearn.grid_search import GridSearchCV
param_grid = {'learning_rate': [0.1, 0.05, 0.01],
'max_depth': [4, 5, 6, 7],
'min_samples_leaf': [5, 7, 10, 12, 15],
'n_estimators': [100, 500, 1000, 2000]}
# 'max_features': [1.0, 0.5, 0.3, 0.1]}
# param_grid = {'n_estimators': [100, ]
# 'learning_rate': [0.1, 0.05, 0.001],
# 'max_depth': [4, 7],
# 'min_samples_leaf': [5, 15],
# 'max_features': [1.0, 0.1]}
# label_encoder = sklearn.preprocessing.LabelEncoder()
# label_encoder.fit(y_all['Target gene'].values[train])
# gene_classes = label_encoder.transform(y_all['Target gene'].values[train])
n_folds = 10 # len(np.unique(gene_classes))
# cv = sklearn.cross_validation.StratifiedKFold(gene_classes, n_folds=n_folds, shuffle=True)
cv = sklearn.cross_validation.KFold(X[train].shape[0], n_folds=n_folds, shuffle=True)
est = en.GradientBoostingRegressor(loss=learn_options['adaboost_loss'], random_state=learn_options['seed'])#, n_estimators=learn_options['adaboost_n_estimators'])
clf = GridSearchCV(est, param_grid, n_jobs=n_jobs, verbose=1, cv=cv, scoring=spearman_scoring, iid=False)
clf.fit(X[train], y[train].flatten())
print clf.best_params_
else:
raise Exception("if using adaboost_CV then need to specify grid (grid search) or bo (bayesian optimization)")
y_pred = clf.predict(X[test])[:, None]
else:
raise NotImplementedError
return y_pred, clf
def LASSOs_ensemble_on_fold(feature_sets, train, test, y, y_all, X, dim, dimsum, learn_options):
train_indices = np.where(train)[0]
sel = len(train_indices)*0.10
permuted_ind = np.random.permutation(train_indices)
valid_indices = permuted_ind[:sel]
train_indices = permuted_ind[sel:]
train_sub = np.zeros_like(train, dtype=bool)
valid_sub = np.zeros_like(train, dtype=bool)
train_sub[train_indices] = True
valid_sub[valid_indices] = True
validations = np.zeros((len(valid_indices), len(feature_sets.keys())))
predictions = np.zeros((test.sum(), len(feature_sets.keys())))
for i, feature_name in enumerate(feature_sets.keys()):
X_feature = feature_sets[feature_name].values
y_pred, m = linreg_on_fold(feature_sets, train_sub, valid_sub, y, y_all, X_feature, dim, dimsum, learn_options)
predictions[:, i] = m.predict(X_feature[test]).flatten()
validations[:, i] = y_pred.flatten()
clf = LinearRegression()
clf.fit(validations, y[valid_sub])
y_pred = clf.predict(predictions)
return y_pred, None
def randomforest_on_fold(feature_sets, train, test, y, y_all, X, dim, dimsum, learn_options):
'''
RandomForestRegressor from scikitlearn.
'''
clf = en.RandomForestRegressor(oob_score=True, n_jobs=20, n_estimators=2000)
clf.fit(X[train], y[train][:, 0])
y_pred = clf.predict(X[test])[:, None]
return y_pred, clf
def decisiontree_on_fold(feature_sets, train, test, y, y_all, X, dim, dimsum, learn_options):
'''
DecisionTreeRegressor from scikitlearn.
'''
clf = tree.DecisionTreeRegressor()
clf.fit(X[train], y[train][:, 0])
y_pred = clf.predict(X[test])[:, None]
return y_pred, clf
def linear_stacking(y_train, X_train, X_test):
clf = sklearn.linear_model.LinearRegression()
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
return y_pred.flatten()
def pairwise_majority_voting(y):
N = y.shape[0]
y_pred = np.zeros((N, N))
for i in range(N):
for j in range(N):
if i == j:
continue
y_pred[i, j] = (y[i] > y[j]).sum() > y.shape[1]/2
return y_pred.sum(1)/y_pred.sum(1).max()
def median(y):
return np.median(y, axis=1)
def GBR_stacking(y_train, X_train, X_test):
param_grid = {'learning_rate': [0.1, 0.05, 0.01],
'max_depth': [2, 3, 4, 5], # [2, 3, 4, 6],
'min_samples_leaf': [1, 2, 3], # ,5, 7],
'max_features': [1.0, 0.5, 0.3, 0.1]}
est = en.GradientBoostingRegressor(loss='ls', n_estimators=100)
clf = GridSearchCV(est, param_grid, n_jobs=3, verbose=1, cv=20, scoring=spearman_scoring).fit(X_train, y_train.flatten())
# clf.fit(X_train, y_train.flatten())
return clf.predict(X_test)
def GP_stacking(y_train, X_train, X_test):
import GPy
m = GPy.models.SparseGPRegression(X_train, y_train, num_inducing=20, kernel=GPy.kern.RBF(X_train.shape[1]))
m.optimize('bfgs', messages=0)
y_pred = m.predict(X_test)[0]
return y_pred.flatten()
def SVM_stacking(y_train, X_train, X_test):
parameters = {'kernel': ('linear', 'rbf'), 'C': np.linspace(1, 10, 10), 'gamma': np.linspace(1e-3, 1., 10)}
svr = svm.SVR()
clf = GridSearchCV(svr, parameters, n_jobs=3, verbose=1, cv=10, scoring=spearman_scoring)
clf.fit(X_train, y_train.flatten())
return clf.predict(X_test)
| bsd-3-clause |
Hiyorimi/scikit-image | doc/examples/xx_applications/plot_geometric.py | 5 | 3544 | """
===============================
Using geometric transformations
===============================
In this example, we will see how to use geometric transformations in the context
of image processing.
"""
from __future__ import print_function
import math
import numpy as np
import matplotlib.pyplot as plt
from skimage import data
from skimage import transform as tf
margins = dict(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0, right=1)
######################################################################
# Basics
# ======
#
# Several different geometric transformation types are supported: similarity,
# affine, projective and polynomial.
#
# Geometric transformations can either be created using the explicit
# parameters (e.g. scale, shear, rotation and translation) or the
# transformation matrix:
#
# First we create a transformation using explicit parameters:
tform = tf.SimilarityTransform(scale=1, rotation=math.pi / 2,
translation=(0, 1))
print(tform.params)
######################################################################
# Alternatively you can define a transformation by the transformation matrix
# itself:
matrix = tform.params.copy()
matrix[1, 2] = 2
tform2 = tf.SimilarityTransform(matrix)
######################################################################
# These transformation objects can then be used to apply forward and inverse
# coordinate transformations between the source and destination coordinate
# systems:
coord = [1, 0]
print(tform2(coord))
print(tform2.inverse(tform(coord)))
######################################################################
# Image warping
# =============
#
# Geometric transformations can also be used to warp images:
text = data.text()
tform = tf.SimilarityTransform(scale=1, rotation=math.pi / 4,
translation=(text.shape[0] / 2, -100))
rotated = tf.warp(text, tform)
back_rotated = tf.warp(rotated, tform.inverse)
fig, (ax1, ax2, ax3) = plt.subplots(ncols=3, figsize=(8, 3))
fig.subplots_adjust(**margins)
plt.gray()
ax1.imshow(text)
ax1.axis('off')
ax2.imshow(rotated)
ax2.axis('off')
ax3.imshow(back_rotated)
ax3.axis('off')
######################################################################
# Parameter estimation
# ====================
#
# In addition to the basic functionality mentioned above you can also
# estimate the parameters of a geometric transformation using the least-
# squares method.
#
# This can amongst other things be used for image registration or
# rectification, where you have a set of control points or
# homologous/corresponding points in two images.
#
# Let's assume we want to recognize letters on a photograph which was not
# taken from the front but at a certain angle. In the simplest case of a
# plane paper surface the letters are projectively distorted. Simple matching
# algorithms would not be able to match such symbols. One solution to this
# problem would be to warp the image so that the distortion is removed and
# then apply a matching algorithm:
text = data.text()
src = np.array((
(0, 0),
(0, 50),
(300, 50),
(300, 0)
))
dst = np.array((
(155, 15),
(65, 40),
(260, 130),
(360, 95)
))
tform3 = tf.ProjectiveTransform()
tform3.estimate(src, dst)
warped = tf.warp(text, tform3, output_shape=(50, 300))
fig, (ax1, ax2) = plt.subplots(nrows=2, figsize=(8, 3))
fig.subplots_adjust(**margins)
plt.gray()
ax1.imshow(text)
ax1.plot(dst[:, 0], dst[:, 1], '.r')
ax1.axis('off')
ax2.imshow(warped)
ax2.axis('off')
| bsd-3-clause |
liculm89/Python_mech_calc | Scripts/conveyor_velocity.py | 1 | 1578 | # -*- coding: utf-8 -*-
"""
Created on Mon Feb 22 13:16:37 2016
@author: Mauro
"""
# Proracun brzine konvejera
import math
from matplotlib import pyplot as plt
import numpy as np
n = 3000.
n_ovi = np.linspace(300, 10, 10)
for i in range(len(n_ovi)):
n_ovi[i] = round(n_ovi[i], 1)
omege = np.zeros(len(n_ovi))
for i in range(len(omege)):
omege[i] = ((math.pi * n_ovi[i]) / 30.)
omega = (math.pi * n) / 30
#print( 'omega iznosi: ', round(omega, 2))
radius = np.linspace(0.03, 0.06, 100)
# radiusi
brzine = np.zeros([len(n_ovi), len(radius)])
for i in range(len(n_ovi)):
for j in range(len(radius)):
brzine[i][j] = omege[i] * radius[j]
brzina = np.zeros(len(radius))
for i in range(len(radius)):
brzina[i] = omega * radius[i]
radius = radius * 1000
fig = plt.figure()
# plt.figure(figsize=(19,12))
ax = fig.add_subplot(111)
ax.text(32, 2 - 0.5, r' $ \omega = \frac {\pi n} {30}$', fontsize=25)
ax.text(32, 2 - 0.9, r' $v = \omega r$', fontsize=25)
import matplotlib as mpl
ax.get_xaxis().set_minor_locator(mpl.ticker.AutoMinorLocator())
ax.get_yaxis().set_minor_locator(mpl.ticker.AutoMinorLocator())
plt.grid(True, which='minor', color='black')
plt.grid(True, which='major', color='black')
#plt.plot(radius, brzina)
for i in range(len(brzine)):
plt.plot(radius, brzine[i])
#n_ovi[:] = n_ovi[::-1]
plt.legend(n_ovi)
plt.title('Omjer radijusa i linearnog pomaka trake za n[okr/min]')
plt.xlim(30, 60)
plt.xlabel('Radijus trake [mm]')
plt.ylabel('Brzina trake [m/s]')
plt.show()
| gpl-3.0 |
ngoix/OCRF | sklearn/utils/random.py | 37 | 10511 | # Author: Hamzeh Alsalhi <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import numpy as np
import scipy.sparse as sp
import operator
import array
from sklearn.utils import check_random_state
from sklearn.utils.fixes import astype
from ._random import sample_without_replacement
__all__ = ['sample_without_replacement', 'choice']
# This is a backport of np.random.choice from numpy 1.7
# The function can be removed when we bump the requirements to >=1.7
def choice(a, size=None, replace=True, p=None, random_state=None):
"""
choice(a, size=None, replace=True, p=None)
Generates a random sample from a given 1-D array
.. versionadded:: 1.7.0
Parameters
-----------
a : 1-D array-like or int
If an ndarray, a random sample is generated from its elements.
If an int, the random sample is generated as if a was np.arange(n)
size : int or tuple of ints, optional
Output shape. Default is None, in which case a single value is
returned.
replace : boolean, optional
Whether the sample is with or without replacement.
p : 1-D array-like, optional
The probabilities associated with each entry in a.
If not given the sample assumes a uniform distribution over all
entries in a.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
--------
samples : 1-D ndarray, shape (size,)
The generated random samples
Raises
-------
ValueError
If a is an int and less than zero, if a or p are not 1-dimensional,
if a is an array-like of size 0, if p is not a vector of
probabilities, if a and p have different lengths, or if
replace=False and the sample size is greater than the population
size
See Also
---------
randint, shuffle, permutation
Examples
---------
Generate a uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3) # doctest: +SKIP
array([0, 3, 4])
>>> #This is equivalent to np.random.randint(0,5,3)
Generate a non-uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3, p=[0.1, 0, 0.3, 0.6, 0]) # doctest: +SKIP
array([3, 3, 0])
Generate a uniform random sample from np.arange(5) of size 3 without
replacement:
>>> np.random.choice(5, 3, replace=False) # doctest: +SKIP
array([3,1,0])
>>> #This is equivalent to np.random.shuffle(np.arange(5))[:3]
Generate a non-uniform random sample from np.arange(5) of size
3 without replacement:
>>> np.random.choice(5, 3, replace=False, p=[0.1, 0, 0.3, 0.6, 0])
... # doctest: +SKIP
array([2, 3, 0])
Any of the above can be repeated with an arbitrary array-like
instead of just integers. For instance:
>>> aa_milne_arr = ['pooh', 'rabbit', 'piglet', 'Christopher']
>>> np.random.choice(aa_milne_arr, 5, p=[0.5, 0.1, 0.1, 0.3])
... # doctest: +SKIP
array(['pooh', 'pooh', 'pooh', 'Christopher', 'piglet'],
dtype='|S11')
"""
random_state = check_random_state(random_state)
# Format and Verify input
a = np.array(a, copy=False)
if a.ndim == 0:
try:
# __index__ must return an integer by python rules.
pop_size = operator.index(a.item())
except TypeError:
raise ValueError("a must be 1-dimensional or an integer")
if pop_size <= 0:
raise ValueError("a must be greater than 0")
elif a.ndim != 1:
raise ValueError("a must be 1-dimensional")
else:
pop_size = a.shape[0]
if pop_size is 0:
raise ValueError("a must be non-empty")
if None != p:
p = np.array(p, dtype=np.double, ndmin=1, copy=False)
if p.ndim != 1:
raise ValueError("p must be 1-dimensional")
if p.size != pop_size:
raise ValueError("a and p must have same size")
if np.any(p < 0):
raise ValueError("probabilities are not non-negative")
if not np.allclose(p.sum(), 1):
raise ValueError("probabilities do not sum to 1")
shape = size
if shape is not None:
size = np.prod(shape, dtype=np.intp)
else:
size = 1
# Actual sampling
if replace:
if None != p:
cdf = p.cumsum()
cdf /= cdf[-1]
uniform_samples = random_state.random_sample(shape)
idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
idx = np.array(idx, copy=False)
else:
idx = random_state.randint(0, pop_size, size=shape)
else:
if size > pop_size:
raise ValueError("Cannot take a larger sample than "
"population when 'replace=False'")
if None != p:
if np.sum(p > 0) < size:
raise ValueError("Fewer non-zero entries in p than size")
n_uniq = 0
p = p.copy()
found = np.zeros(shape, dtype=np.int)
flat_found = found.ravel()
while n_uniq < size:
x = random_state.rand(size - n_uniq)
if n_uniq > 0:
p[flat_found[0:n_uniq]] = 0
cdf = np.cumsum(p)
cdf /= cdf[-1]
new = cdf.searchsorted(x, side='right')
_, unique_indices = np.unique(new, return_index=True)
unique_indices.sort()
new = new.take(unique_indices)
flat_found[n_uniq:n_uniq + new.size] = new
n_uniq += new.size
idx = found
else:
idx = random_state.permutation(pop_size)[:size]
if shape is not None:
idx.shape = shape
if shape is None and isinstance(idx, np.ndarray):
# In most cases a scalar will have been made an array
idx = idx.item(0)
# Use samples as indices for a if a is array-like
if a.ndim == 0:
return idx
if shape is not None and idx.ndim == 0:
# If size == () then the user requested a 0-d array as opposed to
# a scalar object when size is None. However a[idx] is always a
# scalar and not an array. So this makes sure the result is an
# array, taking into account that np.array(item) may not work
# for object arrays.
res = np.empty((), dtype=a.dtype)
res[()] = a[idx]
return res
return a[idx]
def random_choice_csc(n_samples, classes, class_probability=None,
random_state=None):
"""Generate a sparse random matrix given column class distributions
Parameters
----------
n_samples : int,
Number of samples to draw in each column.
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
class_probability : list of size n_outputs of arrays of size (n_classes,)
Optional (default=None). Class distribution of each column. If None the
uniform distribution is assumed.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
random_matrix : sparse csc matrix of size (n_samples, n_outputs)
"""
data = array.array('i')
indices = array.array('i')
indptr = array.array('i', [0])
for j in range(len(classes)):
classes[j] = np.asarray(classes[j])
if classes[j].dtype.kind != 'i':
raise ValueError("class dtype %s is not supported" %
classes[j].dtype)
classes[j] = astype(classes[j], np.int64, copy=False)
# use uniform distribution if no class_probability is given
if class_probability is None:
class_prob_j = np.empty(shape=classes[j].shape[0])
class_prob_j.fill(1 / classes[j].shape[0])
else:
class_prob_j = np.asarray(class_probability[j])
if np.sum(class_prob_j) != 1.0:
raise ValueError("Probability array at index {0} does not sum to "
"one".format(j))
if class_prob_j.shape[0] != classes[j].shape[0]:
raise ValueError("classes[{0}] (length {1}) and "
"class_probability[{0}] (length {2}) have "
"different length.".format(j,
classes[j].shape[0],
class_prob_j.shape[0]))
# If 0 is not present in the classes insert it with a probability 0.0
if 0 not in classes[j]:
classes[j] = np.insert(classes[j], 0, 0)
class_prob_j = np.insert(class_prob_j, 0, 0.0)
# If there are nonzero classes choose randomly using class_probability
rng = check_random_state(random_state)
if classes[j].shape[0] > 1:
p_nonzero = 1 - class_prob_j[classes[j] == 0]
nnz = int(n_samples * p_nonzero)
ind_sample = sample_without_replacement(n_population=n_samples,
n_samples=nnz,
random_state=random_state)
indices.extend(ind_sample)
# Normalize probabilites for the nonzero elements
classes_j_nonzero = classes[j] != 0
class_probability_nz = class_prob_j[classes_j_nonzero]
class_probability_nz_norm = (class_probability_nz /
np.sum(class_probability_nz))
classes_ind = np.searchsorted(class_probability_nz_norm.cumsum(),
rng.rand(nnz))
data.extend(classes[j][classes_j_nonzero][classes_ind])
indptr.append(len(indices))
return sp.csc_matrix((data, indices, indptr),
(n_samples, len(classes)),
dtype=int)
| bsd-3-clause |
lionelliang/PairTradingSpark | checkpairtrading.py | 1 | 1360 | ## Spark Application - execute with spark-submit
## Imports
import csv
#import matplotlib.pyplot as plt
from StringIO import StringIO
from datetime import datetime
from collections import namedtuple
from operator import add, itemgetter
from pyspark import SparkConf, SparkContext
## Module Constants
APP_NAME = "ADF Spark Application"
fields = ('date', 'sym', 'open', 'high', 'low', 'clsoe', 'volume', 'amount')
Quotation = namedtuple('Quotation', fields)
DATE_FMT = "%Y/%m/%d"
TIME_FMT = "%H%M"
## Closure Functions
def parse(row):
"""
Parses a row and returns a named tuple.
"""
row[0] = datetime.strptime(row[0], DATE_FMT).date()
row[2] = float(row[2])
row[3] = float(row[3])
row[4] = float(row[4])
row[5] = float(row[5])
return Quotation(*row[:8])
def split(line):
"""
Operator function for splitting a line with csv module
"""
reader = csv.reader(StringIO(line))
return reader.next()
## Main functionality
def main(sc):
# Read the CSV Data into an RDD
kindleline1 = sc.textFile("600815.csv").map(split).map(parse)
kindleline2 = sc.textFile("601002.csv").map(split).map(parse)
print "%d, %d" %(kindleline1.count(), kindleline2.count())
if __name__ == "__main__":
# Configure Spark
conf = SparkConf().setAppName(APP_NAME)
conf = conf.setMaster("local[*]")
sc = SparkContext(conf=conf)
# Execute Main functionality
main(sc)
| gpl-2.0 |
Vvucinic/Wander | venv_2_7/lib/python2.7/site-packages/numpy/linalg/linalg.py | 32 | 75738 | """Lite version of scipy.linalg.
Notes
-----
This module is a lite version of the linalg.py module in SciPy which
contains high-level Python interface to the LAPACK library. The lite
version only accesses the following LAPACK functions: dgesv, zgesv,
dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf,
zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det',
'svd', 'eig', 'eigh', 'lstsq', 'norm', 'qr', 'cond', 'matrix_rank',
'LinAlgError', 'multi_dot']
import warnings
from numpy.core import (
array, asarray, zeros, empty, empty_like, transpose, intc, single, double,
csingle, cdouble, inexact, complexfloating, newaxis, ravel, all, Inf, dot,
add, multiply, sqrt, maximum, fastCopyAndTranspose, sum, isfinite, size,
finfo, errstate, geterrobj, longdouble, rollaxis, amin, amax, product, abs,
broadcast, atleast_2d, intp, asanyarray, isscalar
)
from numpy.lib import triu, asfarray
from numpy.linalg import lapack_lite, _umath_linalg
from numpy.matrixlib.defmatrix import matrix_power
from numpy.compat import asbytes
# For Python2/3 compatibility
_N = asbytes('N')
_V = asbytes('V')
_A = asbytes('A')
_S = asbytes('S')
_L = asbytes('L')
fortran_int = intc
# Error object
class LinAlgError(Exception):
"""
Generic Python-exception-derived object raised by linalg functions.
General purpose exception class, derived from Python's exception.Exception
class, programmatically raised in linalg functions when a Linear
Algebra-related condition would prevent further correct execution of the
function.
Parameters
----------
None
Examples
--------
>>> from numpy import linalg as LA
>>> LA.inv(np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "...linalg.py", line 350,
in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
File "...linalg.py", line 249,
in solve
raise LinAlgError('Singular matrix')
numpy.linalg.LinAlgError: Singular matrix
"""
pass
# Dealing with errors in _umath_linalg
_linalg_error_extobj = None
def _determine_error_states():
global _linalg_error_extobj
errobj = geterrobj()
bufsize = errobj[0]
with errstate(invalid='call', over='ignore',
divide='ignore', under='ignore'):
invalid_call_errmask = geterrobj()[1]
_linalg_error_extobj = [bufsize, invalid_call_errmask, None]
_determine_error_states()
def _raise_linalgerror_singular(err, flag):
raise LinAlgError("Singular matrix")
def _raise_linalgerror_nonposdef(err, flag):
raise LinAlgError("Matrix is not positive definite")
def _raise_linalgerror_eigenvalues_nonconvergence(err, flag):
raise LinAlgError("Eigenvalues did not converge")
def _raise_linalgerror_svd_nonconvergence(err, flag):
raise LinAlgError("SVD did not converge")
def get_linalg_error_extobj(callback):
extobj = list(_linalg_error_extobj)
extobj[2] = callback
return extobj
def _makearray(a):
new = asarray(a)
wrap = getattr(a, "__array_prepare__", new.__array_wrap__)
return new, wrap
def isComplexType(t):
return issubclass(t, complexfloating)
_real_types_map = {single : single,
double : double,
csingle : single,
cdouble : double}
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _realType(t, default=double):
return _real_types_map.get(t, default)
def _complexType(t, default=cdouble):
return _complex_types_map.get(t, default)
def _linalgRealType(t):
"""Cast the type t to either double or cdouble."""
return double
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _commonType(*arrays):
# in lite version, use higher precision (always double or cdouble)
result_type = single
is_complex = False
for a in arrays:
if issubclass(a.dtype.type, inexact):
if isComplexType(a.dtype.type):
is_complex = True
rt = _realType(a.dtype.type, default=None)
if rt is None:
# unsupported inexact scalar
raise TypeError("array type %s is unsupported in linalg" %
(a.dtype.name,))
else:
rt = double
if rt is double:
result_type = double
if is_complex:
t = cdouble
result_type = _complex_types_map[result_type]
else:
t = double
return t, result_type
# _fastCopyAndTranpose assumes the input is 2D (as all the calls in here are).
_fastCT = fastCopyAndTranspose
def _to_native_byte_order(*arrays):
ret = []
for arr in arrays:
if arr.dtype.byteorder not in ('=', '|'):
ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('=')))
else:
ret.append(arr)
if len(ret) == 1:
return ret[0]
else:
return ret
def _fastCopyAndTranspose(type, *arrays):
cast_arrays = ()
for a in arrays:
if a.dtype.type is type:
cast_arrays = cast_arrays + (_fastCT(a),)
else:
cast_arrays = cast_arrays + (_fastCT(a.astype(type)),)
if len(cast_arrays) == 1:
return cast_arrays[0]
else:
return cast_arrays
def _assertRank2(*arrays):
for a in arrays:
if len(a.shape) != 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'two-dimensional' % len(a.shape))
def _assertRankAtLeast2(*arrays):
for a in arrays:
if len(a.shape) < 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'at least two-dimensional' % len(a.shape))
def _assertSquareness(*arrays):
for a in arrays:
if max(a.shape) != min(a.shape):
raise LinAlgError('Array must be square')
def _assertNdSquareness(*arrays):
for a in arrays:
if max(a.shape[-2:]) != min(a.shape[-2:]):
raise LinAlgError('Last 2 dimensions of the array must be square')
def _assertFinite(*arrays):
for a in arrays:
if not (isfinite(a).all()):
raise LinAlgError("Array must not contain infs or NaNs")
def _assertNoEmpty2d(*arrays):
for a in arrays:
if a.size == 0 and product(a.shape[-2:]) == 0:
raise LinAlgError("Arrays cannot be empty")
# Linear equations
def tensorsolve(a, b, axes=None):
"""
Solve the tensor equation ``a x = b`` for x.
It is assumed that all indices of `x` are summed over in the product,
together with the rightmost indices of `a`, as is done in, for example,
``tensordot(a, x, axes=len(b.shape))``.
Parameters
----------
a : array_like
Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals
the shape of that sub-tensor of `a` consisting of the appropriate
number of its rightmost indices, and must be such that
``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be
'square').
b : array_like
Right-hand tensor, which can be of any shape.
axes : tuple of ints, optional
Axes in `a` to reorder to the right, before inversion.
If None (default), no reordering is done.
Returns
-------
x : ndarray, shape Q
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
tensordot, tensorinv, einsum
Examples
--------
>>> a = np.eye(2*3*4)
>>> a.shape = (2*3, 4, 2, 3, 4)
>>> b = np.random.randn(2*3, 4)
>>> x = np.linalg.tensorsolve(a, b)
>>> x.shape
(2, 3, 4)
>>> np.allclose(np.tensordot(a, x, axes=3), b)
True
"""
a, wrap = _makearray(a)
b = asarray(b)
an = a.ndim
if axes is not None:
allaxes = list(range(0, an))
for k in axes:
allaxes.remove(k)
allaxes.insert(an, k)
a = a.transpose(allaxes)
oldshape = a.shape[-(an-b.ndim):]
prod = 1
for k in oldshape:
prod *= k
a = a.reshape(-1, prod)
b = b.ravel()
res = wrap(solve(a, b))
res.shape = oldshape
return res
def solve(a, b):
"""
Solve a linear matrix equation, or system of linear scalar equations.
Computes the "exact" solution, `x`, of the well-determined, i.e., full
rank, linear matrix equation `ax = b`.
Parameters
----------
a : (..., M, M) array_like
Coefficient matrix.
b : {(..., M,), (..., M, K)}, array_like
Ordinate or "dependent variable" values.
Returns
-------
x : {(..., M,), (..., M, K)} ndarray
Solution to the system a x = b. Returned shape is identical to `b`.
Raises
------
LinAlgError
If `a` is singular or not square.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The solutions are computed using LAPACK routine _gesv
`a` must be square and of full-rank, i.e., all rows (or, equivalently,
columns) must be linearly independent; if either is not true, use
`lstsq` for the least-squares best "solution" of the
system/equation.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 22.
Examples
--------
Solve the system of equations ``3 * x0 + x1 = 9`` and ``x0 + 2 * x1 = 8``:
>>> a = np.array([[3,1], [1,2]])
>>> b = np.array([9,8])
>>> x = np.linalg.solve(a, b)
>>> x
array([ 2., 3.])
Check that the solution is correct:
>>> np.allclose(np.dot(a, x), b)
True
"""
a, _ = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
b, wrap = _makearray(b)
t, result_t = _commonType(a, b)
# We use the b = (..., M,) logic, only if the number of extra dimensions
# match exactly
if b.ndim == a.ndim - 1:
if a.shape[-1] == 0 and b.shape[-1] == 0:
# Legal, but the ufunc cannot handle the 0-sized inner dims
# let the ufunc handle all wrong cases.
a = a.reshape(a.shape[:-1])
bc = broadcast(a, b)
return wrap(empty(bc.shape, dtype=result_t))
gufunc = _umath_linalg.solve1
else:
if b.size == 0:
if (a.shape[-1] == 0 and b.shape[-2] == 0) or b.shape[-1] == 0:
a = a[:,:1].reshape(a.shape[:-1] + (1,))
bc = broadcast(a, b)
return wrap(empty(bc.shape, dtype=result_t))
gufunc = _umath_linalg.solve
signature = 'DD->D' if isComplexType(t) else 'dd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
r = gufunc(a, b, signature=signature, extobj=extobj)
return wrap(r.astype(result_t, copy=False))
def tensorinv(a, ind=2):
"""
Compute the 'inverse' of an N-dimensional array.
The result is an inverse for `a` relative to the tensordot operation
``tensordot(a, b, ind)``, i. e., up to floating-point accuracy,
``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the
tensordot operation.
Parameters
----------
a : array_like
Tensor to 'invert'. Its shape must be 'square', i. e.,
``prod(a.shape[:ind]) == prod(a.shape[ind:])``.
ind : int, optional
Number of first indices that are involved in the inverse sum.
Must be a positive integer, default is 2.
Returns
-------
b : ndarray
`a`'s tensordot inverse, shape ``a.shape[ind:] + a.shape[:ind]``.
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
tensordot, tensorsolve
Examples
--------
>>> a = np.eye(4*6)
>>> a.shape = (4, 6, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=2)
>>> ainv.shape
(8, 3, 4, 6)
>>> b = np.random.randn(4, 6)
>>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b))
True
>>> a = np.eye(4*6)
>>> a.shape = (24, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=1)
>>> ainv.shape
(8, 3, 24)
>>> b = np.random.randn(24)
>>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))
True
"""
a = asarray(a)
oldshape = a.shape
prod = 1
if ind > 0:
invshape = oldshape[ind:] + oldshape[:ind]
for k in oldshape[ind:]:
prod *= k
else:
raise ValueError("Invalid ind argument.")
a = a.reshape(prod, -1)
ia = inv(a)
return ia.reshape(*invshape)
# Matrix inversion
def inv(a):
"""
Compute the (multiplicative) inverse of a matrix.
Given a square matrix `a`, return the matrix `ainv` satisfying
``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``.
Parameters
----------
a : (..., M, M) array_like
Matrix to be inverted.
Returns
-------
ainv : (..., M, M) ndarray or matrix
(Multiplicative) inverse of the matrix `a`.
Raises
------
LinAlgError
If `a` is not square or inversion fails.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
Examples
--------
>>> from numpy.linalg import inv
>>> a = np.array([[1., 2.], [3., 4.]])
>>> ainv = inv(a)
>>> np.allclose(np.dot(a, ainv), np.eye(2))
True
>>> np.allclose(np.dot(ainv, a), np.eye(2))
True
If a is a matrix object, then the return value is a matrix as well:
>>> ainv = inv(np.matrix(a))
>>> ainv
matrix([[-2. , 1. ],
[ 1.5, -0.5]])
Inverses of several matrices can be computed at once:
>>> a = np.array([[[1., 2.], [3., 4.]], [[1, 3], [3, 5]]])
>>> inv(a)
array([[[-2. , 1. ],
[ 1.5, -0.5]],
[[-5. , 2. ],
[ 3. , -1. ]]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
if a.shape[-1] == 0:
# The inner array is 0x0, the ufunc cannot handle this case
return wrap(empty_like(a, dtype=result_t))
signature = 'D->D' if isComplexType(t) else 'd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
ainv = _umath_linalg.inv(a, signature=signature, extobj=extobj)
return wrap(ainv.astype(result_t, copy=False))
# Cholesky decomposition
def cholesky(a):
"""
Cholesky decomposition.
Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`,
where `L` is lower-triangular and .H is the conjugate transpose operator
(which is the ordinary transpose if `a` is real-valued). `a` must be
Hermitian (symmetric if real-valued) and positive-definite. Only `L` is
actually returned.
Parameters
----------
a : (..., M, M) array_like
Hermitian (symmetric if all elements are real), positive-definite
input matrix.
Returns
-------
L : (..., M, M) array_like
Upper or lower-triangular Cholesky factor of `a`. Returns a
matrix object if `a` is a matrix object.
Raises
------
LinAlgError
If the decomposition fails, for example, if `a` is not
positive-definite.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The Cholesky decomposition is often used as a fast way of solving
.. math:: A \\mathbf{x} = \\mathbf{b}
(when `A` is both Hermitian/symmetric and positive-definite).
First, we solve for :math:`\\mathbf{y}` in
.. math:: L \\mathbf{y} = \\mathbf{b},
and then for :math:`\\mathbf{x}` in
.. math:: L.H \\mathbf{x} = \\mathbf{y}.
Examples
--------
>>> A = np.array([[1,-2j],[2j,5]])
>>> A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> L = np.linalg.cholesky(A)
>>> L
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> np.dot(L, L.T.conj()) # verify that L * L.H = A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like?
>>> np.linalg.cholesky(A) # an ndarray object is returned
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> # But a matrix object is returned if A is a matrix object
>>> LA.cholesky(np.matrix(A))
matrix([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
"""
extobj = get_linalg_error_extobj(_raise_linalgerror_nonposdef)
gufunc = _umath_linalg.cholesky_lo
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = gufunc(a, signature=signature, extobj=extobj)
return wrap(r.astype(result_t, copy=False))
# QR decompostion
def qr(a, mode='reduced'):
"""
Compute the qr factorization of a matrix.
Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is
upper-triangular.
Parameters
----------
a : array_like, shape (M, N)
Matrix to be factored.
mode : {'reduced', 'complete', 'r', 'raw', 'full', 'economic'}, optional
If K = min(M, N), then
'reduced' : returns q, r with dimensions (M, K), (K, N) (default)
'complete' : returns q, r with dimensions (M, M), (M, N)
'r' : returns r only with dimensions (K, N)
'raw' : returns h, tau with dimensions (N, M), (K,)
'full' : alias of 'reduced', deprecated
'economic' : returns h from 'raw', deprecated.
The options 'reduced', 'complete, and 'raw' are new in numpy 1.8,
see the notes for more information. The default is 'reduced' and to
maintain backward compatibility with earlier versions of numpy both
it and the old default 'full' can be omitted. Note that array h
returned in 'raw' mode is transposed for calling Fortran. The
'economic' mode is deprecated. The modes 'full' and 'economic' may
be passed using only the first letter for backwards compatibility,
but all others must be spelled out. See the Notes for more
explanation.
Returns
-------
q : ndarray of float or complex, optional
A matrix with orthonormal columns. When mode = 'complete' the
result is an orthogonal/unitary matrix depending on whether or not
a is real/complex. The determinant may be either +/- 1 in that
case.
r : ndarray of float or complex, optional
The upper-triangular matrix.
(h, tau) : ndarrays of np.double or np.cdouble, optional
The array h contains the Householder reflectors that generate q
along with r. The tau array contains scaling factors for the
reflectors. In the deprecated 'economic' mode only h is returned.
Raises
------
LinAlgError
If factoring fails.
Notes
-----
This is an interface to the LAPACK routines dgeqrf, zgeqrf,
dorgqr, and zungqr.
For more information on the qr factorization, see for example:
http://en.wikipedia.org/wiki/QR_factorization
Subclasses of `ndarray` are preserved except for the 'raw' mode. So if
`a` is of type `matrix`, all the return values will be matrices too.
New 'reduced', 'complete', and 'raw' options for mode were added in
Numpy 1.8 and the old option 'full' was made an alias of 'reduced'. In
addition the options 'full' and 'economic' were deprecated. Because
'full' was the previous default and 'reduced' is the new default,
backward compatibility can be maintained by letting `mode` default.
The 'raw' option was added so that LAPACK routines that can multiply
arrays by q using the Householder reflectors can be used. Note that in
this case the returned arrays are of type np.double or np.cdouble and
the h array is transposed to be FORTRAN compatible. No routines using
the 'raw' return are currently exposed by numpy, but some are available
in lapack_lite and just await the necessary work.
Examples
--------
>>> a = np.random.randn(9, 6)
>>> q, r = np.linalg.qr(a)
>>> np.allclose(a, np.dot(q, r)) # a does equal qr
True
>>> r2 = np.linalg.qr(a, mode='r')
>>> r3 = np.linalg.qr(a, mode='economic')
>>> np.allclose(r, r2) # mode='r' returns the same r as mode='full'
True
>>> # But only triu parts are guaranteed equal when mode='economic'
>>> np.allclose(r, np.triu(r3[:6,:6], k=0))
True
Example illustrating a common use of `qr`: solving of least squares
problems
What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for
the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points
and you'll see that it should be y0 = 0, m = 1.) The answer is provided
by solving the over-determined matrix equation ``Ax = b``, where::
A = array([[0, 1], [1, 1], [1, 1], [2, 1]])
x = array([[y0], [m]])
b = array([[1], [0], [2], [1]])
If A = qr such that q is orthonormal (which is always possible via
Gram-Schmidt), then ``x = inv(r) * (q.T) * b``. (In numpy practice,
however, we simply use `lstsq`.)
>>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]])
>>> A
array([[0, 1],
[1, 1],
[1, 1],
[2, 1]])
>>> b = np.array([1, 0, 2, 1])
>>> q, r = LA.qr(A)
>>> p = np.dot(q.T, b)
>>> np.dot(LA.inv(r), p)
array([ 1.1e-16, 1.0e+00])
"""
if mode not in ('reduced', 'complete', 'r', 'raw'):
if mode in ('f', 'full'):
# 2013-04-01, 1.8
msg = "".join((
"The 'full' option is deprecated in favor of 'reduced'.\n",
"For backward compatibility let mode default."))
warnings.warn(msg, DeprecationWarning)
mode = 'reduced'
elif mode in ('e', 'economic'):
# 2013-04-01, 1.8
msg = "The 'economic' option is deprecated.",
warnings.warn(msg, DeprecationWarning)
mode = 'economic'
else:
raise ValueError("Unrecognized mode '%s'" % mode)
a, wrap = _makearray(a)
_assertRank2(a)
_assertNoEmpty2d(a)
m, n = a.shape
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
mn = min(m, n)
tau = zeros((mn,), t)
if isComplexType(t):
lapack_routine = lapack_lite.zgeqrf
routine_name = 'zgeqrf'
else:
lapack_routine = lapack_lite.dgeqrf
routine_name = 'dgeqrf'
# calculate optimal size of work data 'work'
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# do qr decomposition
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# handle modes that don't return q
if mode == 'r':
r = _fastCopyAndTranspose(result_t, a[:, :mn])
return wrap(triu(r))
if mode == 'raw':
return a, tau
if mode == 'economic':
if t != result_t :
a = a.astype(result_t, copy=False)
return wrap(a.T)
# generate q from a
if mode == 'complete' and m > n:
mc = m
q = empty((m, m), t)
else:
mc = mn
q = empty((n, m), t)
q[:n] = a
if isComplexType(t):
lapack_routine = lapack_lite.zungqr
routine_name = 'zungqr'
else:
lapack_routine = lapack_lite.dorgqr
routine_name = 'dorgqr'
# determine optimal lwork
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# compute q
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
q = _fastCopyAndTranspose(result_t, q[:mc])
r = _fastCopyAndTranspose(result_t, a[:, :mc])
return wrap(q), wrap(triu(r))
# Eigenvalues
def eigvals(a):
"""
Compute the eigenvalues of a general matrix.
Main difference between `eigvals` and `eig`: the eigenvectors aren't
returned.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues will be computed.
Returns
-------
w : (..., M,) ndarray
The eigenvalues, each repeated according to its multiplicity.
They are not necessarily ordered, nor are they necessarily
real for real matrices.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eig : eigenvalues and right eigenvectors of general arrays
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the _geev LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
Examples
--------
Illustration, using the fact that the eigenvalues of a diagonal matrix
are its diagonal elements, that multiplying a matrix on the left
by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose
of `Q`), preserves the eigenvalues of the "middle" matrix. In other words,
if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as
``A``:
>>> from numpy import linalg as LA
>>> x = np.random.random()
>>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]])
>>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :])
(1.0, 1.0, 0.0)
Now multiply a diagonal matrix by Q on one side and by Q.T on the other:
>>> D = np.diag((-1,1))
>>> LA.eigvals(D)
array([-1., 1.])
>>> A = np.dot(Q, D)
>>> A = np.dot(A, Q.T)
>>> LA.eigvals(A)
array([ 1., -1.])
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->D' if isComplexType(t) else 'd->D'
w = _umath_linalg.eigvals(a, signature=signature, extobj=extobj)
if not isComplexType(t):
if all(w.imag == 0):
w = w.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
return w.astype(result_t, copy=False)
def eigvalsh(a, UPLO='L'):
"""
Compute the eigenvalues of a Hermitian or real symmetric matrix.
Main difference from eigh: the eigenvectors are not computed.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues are to be
computed.
UPLO : {'L', 'U'}, optional
Same as `lower`, with 'L' for lower and 'U' for upper triangular.
Deprecated.
Returns
-------
w : (..., M,) ndarray
The eigenvalues in ascending order, each repeated according to
its multiplicity.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
eigvals : eigenvalues of general real or complex arrays.
eig : eigenvalues and right eigenvectors of general real or complex
arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues are computed using LAPACK routines _syevd, _heevd
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> LA.eigvalsh(a)
array([ 0.17157288, 5.82842712])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigvalsh_lo
else:
gufunc = _umath_linalg.eigvalsh_up
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->d' if isComplexType(t) else 'd->d'
w = gufunc(a, signature=signature, extobj=extobj)
return w.astype(_realType(result_t), copy=False)
def _convertarray(a):
t, result_t = _commonType(a)
a = _fastCT(a.astype(t))
return a, t, result_t
# Eigenvectors
def eig(a):
"""
Compute the eigenvalues and right eigenvectors of a square array.
Parameters
----------
a : (..., M, M) array
Matrices for which the eigenvalues and right eigenvectors will
be computed
Returns
-------
w : (..., M) array
The eigenvalues, each repeated according to its multiplicity.
The eigenvalues are not necessarily ordered. The resulting
array will be of complex type, unless the imaginary part is
zero in which case it will be cast to a real type. When `a`
is real the resulting eigenvalues will be real (0 imaginary
part) or occur in conjugate pairs
v : (..., M, M) array
The normalized (unit "length") eigenvectors, such that the
column ``v[:,i]`` is the eigenvector corresponding to the
eigenvalue ``w[i]``.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvals : eigenvalues of a non-symmetric array.
eigh : eigenvalues and eigenvectors of a symmetric or Hermitian
(conjugate symmetric) array.
eigvalsh : eigenvalues of a symmetric or Hermitian (conjugate symmetric)
array.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the _geev LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
The number `w` is an eigenvalue of `a` if there exists a vector
`v` such that ``dot(a,v) = w * v``. Thus, the arrays `a`, `w`, and
`v` satisfy the equations ``dot(a[:,:], v[:,i]) = w[i] * v[:,i]``
for :math:`i \\in \\{0,...,M-1\\}`.
The array `v` of eigenvectors may not be of maximum rank, that is, some
of the columns may be linearly dependent, although round-off error may
obscure that fact. If the eigenvalues are all different, then theoretically
the eigenvectors are linearly independent. Likewise, the (complex-valued)
matrix of eigenvectors `v` is unitary if the matrix `a` is normal, i.e.,
if ``dot(a, a.H) = dot(a.H, a)``, where `a.H` denotes the conjugate
transpose of `a`.
Finally, it is emphasized that `v` consists of the *right* (as in
right-hand side) eigenvectors of `a`. A vector `y` satisfying
``dot(y.T, a) = z * y.T`` for some number `z` is called a *left*
eigenvector of `a`, and, in general, the left and right eigenvectors
of a matrix are not necessarily the (perhaps conjugate) transposes
of each other.
References
----------
G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL,
Academic Press, Inc., 1980, Various pp.
Examples
--------
>>> from numpy import linalg as LA
(Almost) trivial example with real e-values and e-vectors.
>>> w, v = LA.eig(np.diag((1, 2, 3)))
>>> w; v
array([ 1., 2., 3.])
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
Real matrix possessing complex e-values and e-vectors; note that the
e-values are complex conjugates of each other.
>>> w, v = LA.eig(np.array([[1, -1], [1, 1]]))
>>> w; v
array([ 1. + 1.j, 1. - 1.j])
array([[ 0.70710678+0.j , 0.70710678+0.j ],
[ 0.00000000-0.70710678j, 0.00000000+0.70710678j]])
Complex-valued matrix with real e-values (but complex-valued e-vectors);
note that a.conj().T = a, i.e., a is Hermitian.
>>> a = np.array([[1, 1j], [-1j, 1]])
>>> w, v = LA.eig(a)
>>> w; v
array([ 2.00000000e+00+0.j, 5.98651912e-36+0.j]) # i.e., {2, 0}
array([[ 0.00000000+0.70710678j, 0.70710678+0.j ],
[ 0.70710678+0.j , 0.00000000+0.70710678j]])
Be careful about round-off error!
>>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]])
>>> # Theor. e-values are 1 +/- 1e-9
>>> w, v = LA.eig(a)
>>> w; v
array([ 1., 1.])
array([[ 1., 0.],
[ 0., 1.]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->DD' if isComplexType(t) else 'd->DD'
w, vt = _umath_linalg.eig(a, signature=signature, extobj=extobj)
if not isComplexType(t) and all(w.imag == 0.0):
w = w.real
vt = vt.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
vt = vt.astype(result_t, copy=False)
return w.astype(result_t, copy=False), wrap(vt)
def eigh(a, UPLO='L'):
"""
Return the eigenvalues and eigenvectors of a Hermitian or symmetric matrix.
Returns two objects, a 1-D array containing the eigenvalues of `a`, and
a 2-D square array or matrix (depending on the input type) of the
corresponding eigenvectors (in columns).
Parameters
----------
a : (..., M, M) array
Hermitian/Symmetric matrices whose eigenvalues and
eigenvectors are to be computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Returns
-------
w : (..., M) ndarray
The eigenvalues in ascending order, each repeated according to
its multiplicity.
v : {(..., M, M) ndarray, (..., M, M) matrix}
The column ``v[:, i]`` is the normalized eigenvector corresponding
to the eigenvalue ``w[i]``. Will return a matrix object if `a` is
a matrix object.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eig : eigenvalues and right eigenvectors for non-symmetric arrays.
eigvals : eigenvalues of non-symmetric arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues/eigenvectors are computed using LAPACK routines _syevd,
_heevd
The eigenvalues of real symmetric or complex Hermitian matrices are
always real. [1]_ The array `v` of (column) eigenvectors is unitary
and `a`, `w`, and `v` satisfy the equations
``dot(a, v[:, i]) = w[i] * v[:, i]``.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 222.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> a
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(a)
>>> w; v
array([ 0.17157288, 5.82842712])
array([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
>>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair
array([2.77555756e-17 + 0.j, 0. + 1.38777878e-16j])
>>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair
array([ 0.+0.j, 0.+0.j])
>>> A = np.matrix(a) # what happens if input is a matrix object
>>> A
matrix([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(A)
>>> w; v
array([ 0.17157288, 5.82842712])
matrix([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigh_lo
else:
gufunc = _umath_linalg.eigh_up
signature = 'D->dD' if isComplexType(t) else 'd->dd'
w, vt = gufunc(a, signature=signature, extobj=extobj)
w = w.astype(_realType(result_t), copy=False)
vt = vt.astype(result_t, copy=False)
return w, wrap(vt)
# Singular value decomposition
def svd(a, full_matrices=1, compute_uv=1):
"""
Singular Value Decomposition.
Factors the matrix `a` as ``u * np.diag(s) * v``, where `u` and `v`
are unitary and `s` is a 1-d array of `a`'s singular values.
Parameters
----------
a : (..., M, N) array_like
A real or complex matrix of shape (`M`, `N`) .
full_matrices : bool, optional
If True (default), `u` and `v` have the shapes (`M`, `M`) and
(`N`, `N`), respectively. Otherwise, the shapes are (`M`, `K`)
and (`K`, `N`), respectively, where `K` = min(`M`, `N`).
compute_uv : bool, optional
Whether or not to compute `u` and `v` in addition to `s`. True
by default.
Returns
-------
u : { (..., M, M), (..., M, K) } array
Unitary matrices. The actual shape depends on the value of
``full_matrices``. Only returned when ``compute_uv`` is True.
s : (..., K) array
The singular values for every matrix, sorted in descending order.
v : { (..., N, N), (..., K, N) } array
Unitary matrices. The actual shape depends on the value of
``full_matrices``. Only returned when ``compute_uv`` is True.
Raises
------
LinAlgError
If SVD computation does not converge.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The decomposition is performed using LAPACK routine _gesdd
The SVD is commonly written as ``a = U S V.H``. The `v` returned
by this function is ``V.H`` and ``u = U``.
If ``U`` is a unitary matrix, it means that it
satisfies ``U.H = inv(U)``.
The rows of `v` are the eigenvectors of ``a.H a``. The columns
of `u` are the eigenvectors of ``a a.H``. For row ``i`` in
`v` and column ``i`` in `u`, the corresponding eigenvalue is
``s[i]**2``.
If `a` is a `matrix` object (as opposed to an `ndarray`), then so
are all the return values.
Examples
--------
>>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)
Reconstruction based on full SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=True)
>>> U.shape, V.shape, s.shape
((9, 9), (6, 6), (6,))
>>> S = np.zeros((9, 6), dtype=complex)
>>> S[:6, :6] = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
Reconstruction based on reduced SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=False)
>>> U.shape, V.shape, s.shape
((9, 6), (6, 6), (6,))
>>> S = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence)
m = a.shape[-2]
n = a.shape[-1]
if compute_uv:
if full_matrices:
if m < n:
gufunc = _umath_linalg.svd_m_f
else:
gufunc = _umath_linalg.svd_n_f
else:
if m < n:
gufunc = _umath_linalg.svd_m_s
else:
gufunc = _umath_linalg.svd_n_s
signature = 'D->DdD' if isComplexType(t) else 'd->ddd'
u, s, vt = gufunc(a, signature=signature, extobj=extobj)
u = u.astype(result_t, copy=False)
s = s.astype(_realType(result_t), copy=False)
vt = vt.astype(result_t, copy=False)
return wrap(u), s, wrap(vt)
else:
if m < n:
gufunc = _umath_linalg.svd_m
else:
gufunc = _umath_linalg.svd_n
signature = 'D->d' if isComplexType(t) else 'd->d'
s = gufunc(a, signature=signature, extobj=extobj)
s = s.astype(_realType(result_t), copy=False)
return s
def cond(x, p=None):
"""
Compute the condition number of a matrix.
This function is capable of returning the condition number using
one of seven different norms, depending on the value of `p` (see
Parameters below).
Parameters
----------
x : (..., M, N) array_like
The matrix whose condition number is sought.
p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional
Order of the norm:
===== ============================
p norm for matrices
===== ============================
None 2-norm, computed directly using the ``SVD``
'fro' Frobenius norm
inf max(sum(abs(x), axis=1))
-inf min(sum(abs(x), axis=1))
1 max(sum(abs(x), axis=0))
-1 min(sum(abs(x), axis=0))
2 2-norm (largest sing. value)
-2 smallest singular value
===== ============================
inf means the numpy.inf object, and the Frobenius norm is
the root-of-sum-of-squares norm.
Returns
-------
c : {float, inf}
The condition number of the matrix. May be infinite.
See Also
--------
numpy.linalg.norm
Notes
-----
The condition number of `x` is defined as the norm of `x` times the
norm of the inverse of `x` [1]_; the norm can be the usual L2-norm
(root-of-sum-of-squares) or one of a number of other matrix norms.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL,
Academic Press, Inc., 1980, pg. 285.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]])
>>> a
array([[ 1, 0, -1],
[ 0, 1, 0],
[ 1, 0, 1]])
>>> LA.cond(a)
1.4142135623730951
>>> LA.cond(a, 'fro')
3.1622776601683795
>>> LA.cond(a, np.inf)
2.0
>>> LA.cond(a, -np.inf)
1.0
>>> LA.cond(a, 1)
2.0
>>> LA.cond(a, -1)
1.0
>>> LA.cond(a, 2)
1.4142135623730951
>>> LA.cond(a, -2)
0.70710678118654746
>>> min(LA.svd(a, compute_uv=0))*min(LA.svd(LA.inv(a), compute_uv=0))
0.70710678118654746
"""
x = asarray(x) # in case we have a matrix
if p is None:
s = svd(x, compute_uv=False)
return s[..., 0]/s[..., -1]
else:
return norm(x, p, axis=(-2, -1)) * norm(inv(x), p, axis=(-2, -1))
def matrix_rank(M, tol=None):
"""
Return matrix rank of array using SVD method
Rank of the array is the number of SVD singular values of the array that are
greater than `tol`.
Parameters
----------
M : {(M,), (M, N)} array_like
array of <=2 dimensions
tol : {None, float}, optional
threshold below which SVD values are considered zero. If `tol` is
None, and ``S`` is an array with singular values for `M`, and
``eps`` is the epsilon value for datatype of ``S``, then `tol` is
set to ``S.max() * max(M.shape) * eps``.
Notes
-----
The default threshold to detect rank deficiency is a test on the magnitude
of the singular values of `M`. By default, we identify singular values less
than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with
the symbols defined above). This is the algorithm MATLAB uses [1]. It also
appears in *Numerical recipes* in the discussion of SVD solutions for linear
least squares [2].
This default threshold is designed to detect rank deficiency accounting for
the numerical errors of the SVD computation. Imagine that there is a column
in `M` that is an exact (in floating point) linear combination of other
columns in `M`. Computing the SVD on `M` will not produce a singular value
exactly equal to 0 in general: any difference of the smallest SVD value from
0 will be caused by numerical imprecision in the calculation of the SVD.
Our threshold for small SVD values takes this numerical imprecision into
account, and the default threshold will detect such numerical rank
deficiency. The threshold may declare a matrix `M` rank deficient even if
the linear combination of some columns of `M` is not exactly equal to
another column of `M` but only numerically very close to another column of
`M`.
We chose our default threshold because it is in wide use. Other thresholds
are possible. For example, elsewhere in the 2007 edition of *Numerical
recipes* there is an alternative threshold of ``S.max() *
np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe
this threshold as being based on "expected roundoff error" (p 71).
The thresholds above deal with floating point roundoff error in the
calculation of the SVD. However, you may have more information about the
sources of error in `M` that would make you consider other tolerance values
to detect *effective* rank deficiency. The most useful measure of the
tolerance depends on the operations you intend to use on your matrix. For
example, if your data come from uncertain measurements with uncertainties
greater than floating point epsilon, choosing a tolerance near that
uncertainty may be preferable. The tolerance may be absolute if the
uncertainties are absolute rather than relative.
References
----------
.. [1] MATLAB reference documention, "Rank"
http://www.mathworks.com/help/techdoc/ref/rank.html
.. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery,
"Numerical Recipes (3rd edition)", Cambridge University Press, 2007,
page 795.
Examples
--------
>>> from numpy.linalg import matrix_rank
>>> matrix_rank(np.eye(4)) # Full rank matrix
4
>>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
>>> matrix_rank(I)
3
>>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
1
>>> matrix_rank(np.zeros((4,)))
0
"""
M = asarray(M)
if M.ndim > 2:
raise TypeError('array should have 2 or fewer dimensions')
if M.ndim < 2:
return int(not all(M==0))
S = svd(M, compute_uv=False)
if tol is None:
tol = S.max() * max(M.shape) * finfo(S.dtype).eps
return sum(S > tol)
# Generalized inverse
def pinv(a, rcond=1e-15 ):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate the generalized inverse of a matrix using its
singular-value decomposition (SVD) and including all
*large* singular values.
Parameters
----------
a : (M, N) array_like
Matrix to be pseudo-inverted.
rcond : float
Cutoff for small singular values.
Singular values smaller (in modulus) than
`rcond` * largest_singular_value (again, in modulus)
are set to zero.
Returns
-------
B : (N, M) ndarray
The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
is `B`.
Raises
------
LinAlgError
If the SVD computation does not converge.
Notes
-----
The pseudo-inverse of a matrix A, denoted :math:`A^+`, is
defined as: "the matrix that 'solves' [the least-squares problem]
:math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then
:math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`.
It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular
value decomposition of A, then
:math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are
orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting
of A's so-called singular values, (followed, typically, by
zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix
consisting of the reciprocals of A's singular values
(again, followed by zeros). [1]_
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pp. 139-142.
Examples
--------
The following example checks that ``a * a+ * a == a`` and
``a+ * a * a+ == a+``:
>>> a = np.random.randn(9, 6)
>>> B = np.linalg.pinv(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
a = a.conjugate()
u, s, vt = svd(a, 0)
m = u.shape[0]
n = vt.shape[1]
cutoff = rcond*maximum.reduce(s)
for i in range(min(n, m)):
if s[i] > cutoff:
s[i] = 1./s[i]
else:
s[i] = 0.;
res = dot(transpose(vt), multiply(s[:, newaxis], transpose(u)))
return wrap(res)
# Determinant
def slogdet(a):
"""
Compute the sign and (natural) logarithm of the determinant of an array.
If an array has a very small or very large determinant, then a call to
`det` may overflow or underflow. This routine is more robust against such
issues, because it computes the logarithm of the determinant rather than
the determinant itself.
Parameters
----------
a : (..., M, M) array_like
Input array, has to be a square 2-D array.
Returns
-------
sign : (...) array_like
A number representing the sign of the determinant. For a real matrix,
this is 1, 0, or -1. For a complex matrix, this is a complex number
with absolute value 1 (i.e., it is on the unit circle), or else 0.
logdet : (...) array_like
The natural log of the absolute value of the determinant.
If the determinant is zero, then `sign` will be 0 and `logdet` will be
-Inf. In all cases, the determinant is equal to ``sign * np.exp(logdet)``.
See Also
--------
det
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
.. versionadded:: 1.6.0.
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``:
>>> a = np.array([[1, 2], [3, 4]])
>>> (sign, logdet) = np.linalg.slogdet(a)
>>> (sign, logdet)
(-1, 0.69314718055994529)
>>> sign * np.exp(logdet)
-2.0
Computing log-determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> sign, logdet = np.linalg.slogdet(a)
>>> (sign, logdet)
(array([-1., -1., -1.]), array([ 0.69314718, 1.09861229, 2.07944154]))
>>> sign * np.exp(logdet)
array([-2., -3., -8.])
This routine succeeds where ordinary `det` does not:
>>> np.linalg.det(np.eye(500) * 0.1)
0.0
>>> np.linalg.slogdet(np.eye(500) * 0.1)
(1, -1151.2925464970228)
"""
a = asarray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
real_t = _realType(result_t)
signature = 'D->Dd' if isComplexType(t) else 'd->dd'
sign, logdet = _umath_linalg.slogdet(a, signature=signature)
if isscalar(sign):
sign = sign.astype(result_t)
else:
sign = sign.astype(result_t, copy=False)
if isscalar(logdet):
logdet = logdet.astype(real_t)
else:
logdet = logdet.astype(real_t, copy=False)
return sign, logdet
def det(a):
"""
Compute the determinant of an array.
Parameters
----------
a : (..., M, M) array_like
Input array to compute determinants for.
Returns
-------
det : (...) array_like
Determinant of `a`.
See Also
--------
slogdet : Another way to representing the determinant, more suitable
for large matrices where underflow/overflow may occur.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:
>>> a = np.array([[1, 2], [3, 4]])
>>> np.linalg.det(a)
-2.0
Computing determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> np.linalg.det(a)
array([-2., -3., -8.])
"""
a = asarray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = _umath_linalg.det(a, signature=signature)
if isscalar(r):
r = r.astype(result_t)
else:
r = r.astype(result_t, copy=False)
return r
# Linear Least Squares
def lstsq(a, b, rcond=-1):
"""
Return the least-squares solution to a linear matrix equation.
Solves the equation `a x = b` by computing a vector `x` that
minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may
be under-, well-, or over- determined (i.e., the number of
linearly independent rows of `a` can be less than, equal to, or
greater than its number of linearly independent columns). If `a`
is square and of full rank, then `x` (but for round-off error) is
the "exact" solution of the equation.
Parameters
----------
a : (M, N) array_like
"Coefficient" matrix.
b : {(M,), (M, K)} array_like
Ordinate or "dependent variable" values. If `b` is two-dimensional,
the least-squares solution is calculated for each of the `K` columns
of `b`.
rcond : float, optional
Cut-off ratio for small singular values of `a`.
Singular values are set to zero if they are smaller than `rcond`
times the largest singular value of `a`.
Returns
-------
x : {(N,), (N, K)} ndarray
Least-squares solution. If `b` is two-dimensional,
the solutions are in the `K` columns of `x`.
residuals : {(), (1,), (K,)} ndarray
Sums of residuals; squared Euclidean 2-norm for each column in
``b - a*x``.
If the rank of `a` is < N or M <= N, this is an empty array.
If `b` is 1-dimensional, this is a (1,) shape array.
Otherwise the shape is (K,).
rank : int
Rank of matrix `a`.
s : (min(M, N),) ndarray
Singular values of `a`.
Raises
------
LinAlgError
If computation does not converge.
Notes
-----
If `b` is a matrix, then all array results are returned as matrices.
Examples
--------
Fit a line, ``y = mx + c``, through some noisy data-points:
>>> x = np.array([0, 1, 2, 3])
>>> y = np.array([-1, 0.2, 0.9, 2.1])
By examining the coefficients, we see that the line should have a
gradient of roughly 1 and cut the y-axis at, more or less, -1.
We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]``
and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`:
>>> A = np.vstack([x, np.ones(len(x))]).T
>>> A
array([[ 0., 1.],
[ 1., 1.],
[ 2., 1.],
[ 3., 1.]])
>>> m, c = np.linalg.lstsq(A, y)[0]
>>> print m, c
1.0 -0.95
Plot the data along with the fitted line:
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o', label='Original data', markersize=10)
>>> plt.plot(x, m*x + c, 'r', label='Fitted line')
>>> plt.legend()
>>> plt.show()
"""
import math
a, _ = _makearray(a)
b, wrap = _makearray(b)
is_1d = len(b.shape) == 1
if is_1d:
b = b[:, newaxis]
_assertRank2(a, b)
m = a.shape[0]
n = a.shape[1]
n_rhs = b.shape[1]
ldb = max(n, m)
if m != b.shape[0]:
raise LinAlgError('Incompatible dimensions')
t, result_t = _commonType(a, b)
result_real_t = _realType(result_t)
real_t = _linalgRealType(t)
bstar = zeros((ldb, n_rhs), t)
bstar[:b.shape[0], :n_rhs] = b.copy()
a, bstar = _fastCopyAndTranspose(t, a, bstar)
a, bstar = _to_native_byte_order(a, bstar)
s = zeros((min(m, n),), real_t)
nlvl = max( 0, int( math.log( float(min(m, n))/2. ) ) + 1 )
iwork = zeros((3*min(m, n)*nlvl+11*min(m, n),), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zgelsd
lwork = 1
rwork = zeros((lwork,), real_t)
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, rwork, iwork, 0)
lwork = int(abs(work[0]))
rwork = zeros((lwork,), real_t)
a_real = zeros((m, n), real_t)
bstar_real = zeros((ldb, n_rhs,), real_t)
results = lapack_lite.dgelsd(m, n, n_rhs, a_real, m,
bstar_real, ldb, s, rcond,
0, rwork, -1, iwork, 0)
lrwork = int(rwork[0])
work = zeros((lwork,), t)
rwork = zeros((lrwork,), real_t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, rwork, iwork, 0)
else:
lapack_routine = lapack_lite.dgelsd
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, iwork, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, iwork, 0)
if results['info'] > 0:
raise LinAlgError('SVD did not converge in Linear Least Squares')
resids = array([], result_real_t)
if is_1d:
x = array(ravel(bstar)[:n], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = array([sum(abs(ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
resids = array([sum((ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
x = array(transpose(bstar)[:n,:], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = sum(abs(transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t, copy=False)
else:
resids = sum((transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t, copy=False)
st = s[:min(n, m)].astype(result_real_t, copy=True)
return wrap(x), wrap(resids), results['rank'], st
def _multi_svd_norm(x, row_axis, col_axis, op):
"""Compute a function of the singular values of the 2-D matrices in `x`.
This is a private utility function used by numpy.linalg.norm().
Parameters
----------
x : ndarray
row_axis, col_axis : int
The axes of `x` that hold the 2-D matrices.
op : callable
This should be either numpy.amin or numpy.amax or numpy.sum.
Returns
-------
result : float or ndarray
If `x` is 2-D, the return values is a float.
Otherwise, it is an array with ``x.ndim - 2`` dimensions.
The return values are either the minimum or maximum or sum of the
singular values of the matrices, depending on whether `op`
is `numpy.amin` or `numpy.amax` or `numpy.sum`.
"""
if row_axis > col_axis:
row_axis -= 1
y = rollaxis(rollaxis(x, col_axis, x.ndim), row_axis, -1)
result = op(svd(y, compute_uv=0), axis=-1)
return result
def norm(x, ord=None, axis=None, keepdims=False):
"""
Matrix or vector norm.
This function is able to return one of eight different matrix norms,
or one of an infinite number of vector norms (described below), depending
on the value of the ``ord`` parameter.
Parameters
----------
x : array_like
Input array. If `axis` is None, `x` must be 1-D or 2-D.
ord : {non-zero int, inf, -inf, 'fro', 'nuc'}, optional
Order of the norm (see table under ``Notes``). inf means numpy's
`inf` object.
axis : {int, 2-tuple of ints, None}, optional
If `axis` is an integer, it specifies the axis of `x` along which to
compute the vector norms. If `axis` is a 2-tuple, it specifies the
axes that hold 2-D matrices, and the matrix norms of these matrices
are computed. If `axis` is None then either a vector norm (when `x`
is 1-D) or a matrix norm (when `x` is 2-D) is returned.
keepdims : bool, optional
If this is set to True, the axes which are normed over are left in the
result as dimensions with size one. With this option the result will
broadcast correctly against the original `x`.
.. versionadded:: 1.10.0
Returns
-------
n : float or ndarray
Norm of the matrix or vector(s).
Notes
-----
For values of ``ord <= 0``, the result is, strictly speaking, not a
mathematical 'norm', but it may still be useful for various numerical
purposes.
The following norms can be calculated:
===== ============================ ==========================
ord norm for matrices norm for vectors
===== ============================ ==========================
None Frobenius norm 2-norm
'fro' Frobenius norm --
'nuc' nuclear norm --
inf max(sum(abs(x), axis=1)) max(abs(x))
-inf min(sum(abs(x), axis=1)) min(abs(x))
0 -- sum(x != 0)
1 max(sum(abs(x), axis=0)) as below
-1 min(sum(abs(x), axis=0)) as below
2 2-norm (largest sing. value) as below
-2 smallest singular value as below
other -- sum(abs(x)**ord)**(1./ord)
===== ============================ ==========================
The Frobenius norm is given by [1]_:
:math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
The nuclear norm is the sum of the singular values.
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.arange(9) - 4
>>> a
array([-4, -3, -2, -1, 0, 1, 2, 3, 4])
>>> b = a.reshape((3, 3))
>>> b
array([[-4, -3, -2],
[-1, 0, 1],
[ 2, 3, 4]])
>>> LA.norm(a)
7.745966692414834
>>> LA.norm(b)
7.745966692414834
>>> LA.norm(b, 'fro')
7.745966692414834
>>> LA.norm(a, np.inf)
4
>>> LA.norm(b, np.inf)
9
>>> LA.norm(a, -np.inf)
0
>>> LA.norm(b, -np.inf)
2
>>> LA.norm(a, 1)
20
>>> LA.norm(b, 1)
7
>>> LA.norm(a, -1)
-4.6566128774142013e-010
>>> LA.norm(b, -1)
6
>>> LA.norm(a, 2)
7.745966692414834
>>> LA.norm(b, 2)
7.3484692283495345
>>> LA.norm(a, -2)
nan
>>> LA.norm(b, -2)
1.8570331885190563e-016
>>> LA.norm(a, 3)
5.8480354764257312
>>> LA.norm(a, -3)
nan
Using the `axis` argument to compute vector norms:
>>> c = np.array([[ 1, 2, 3],
... [-1, 1, 4]])
>>> LA.norm(c, axis=0)
array([ 1.41421356, 2.23606798, 5. ])
>>> LA.norm(c, axis=1)
array([ 3.74165739, 4.24264069])
>>> LA.norm(c, ord=1, axis=1)
array([6, 6])
Using the `axis` argument to compute matrix norms:
>>> m = np.arange(8).reshape(2,2,2)
>>> LA.norm(m, axis=(1,2))
array([ 3.74165739, 11.22497216])
>>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :])
(3.7416573867739413, 11.224972160321824)
"""
x = asarray(x)
# Immediately handle some default, simple, fast, and common cases.
if axis is None:
ndim = x.ndim
if ((ord is None) or
(ord in ('f', 'fro') and ndim == 2) or
(ord == 2 and ndim == 1)):
x = x.ravel(order='K')
if isComplexType(x.dtype.type):
sqnorm = dot(x.real, x.real) + dot(x.imag, x.imag)
else:
sqnorm = dot(x, x)
ret = sqrt(sqnorm)
if keepdims:
ret = ret.reshape(ndim*[1])
return ret
# Normalize the `axis` argument to a tuple.
nd = x.ndim
if axis is None:
axis = tuple(range(nd))
elif not isinstance(axis, tuple):
try:
axis = int(axis)
except:
raise TypeError("'axis' must be None, an integer or a tuple of integers")
axis = (axis,)
if len(axis) == 1:
if ord == Inf:
return abs(x).max(axis=axis, keepdims=keepdims)
elif ord == -Inf:
return abs(x).min(axis=axis, keepdims=keepdims)
elif ord == 0:
# Zero norm
return (x != 0).sum(axis=axis, keepdims=keepdims)
elif ord == 1:
# special case for speedup
return add.reduce(abs(x), axis=axis, keepdims=keepdims)
elif ord is None or ord == 2:
# special case for speedup
s = (x.conj() * x).real
return sqrt(add.reduce(s, axis=axis, keepdims=keepdims))
else:
try:
ord + 1
except TypeError:
raise ValueError("Invalid norm order for vectors.")
if x.dtype.type is longdouble:
# Convert to a float type, so integer arrays give
# float results. Don't apply asfarray to longdouble arrays,
# because it will downcast to float64.
absx = abs(x)
else:
absx = x if isComplexType(x.dtype.type) else asfarray(x)
if absx.dtype is x.dtype:
absx = abs(absx)
else:
# if the type changed, we can safely overwrite absx
abs(absx, out=absx)
absx **= ord
return add.reduce(absx, axis=axis, keepdims=keepdims) ** (1.0 / ord)
elif len(axis) == 2:
row_axis, col_axis = axis
if row_axis < 0:
row_axis += nd
if col_axis < 0:
col_axis += nd
if not (0 <= row_axis < nd and 0 <= col_axis < nd):
raise ValueError('Invalid axis %r for an array with shape %r' %
(axis, x.shape))
if row_axis == col_axis:
raise ValueError('Duplicate axes given.')
if ord == 2:
ret = _multi_svd_norm(x, row_axis, col_axis, amax)
elif ord == -2:
ret = _multi_svd_norm(x, row_axis, col_axis, amin)
elif ord == 1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).max(axis=col_axis)
elif ord == Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).max(axis=row_axis)
elif ord == -1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).min(axis=col_axis)
elif ord == -Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).min(axis=row_axis)
elif ord in [None, 'fro', 'f']:
ret = sqrt(add.reduce((x.conj() * x).real, axis=axis))
elif ord == 'nuc':
ret = _multi_svd_norm(x, row_axis, col_axis, sum)
else:
raise ValueError("Invalid norm order for matrices.")
if keepdims:
ret_shape = list(x.shape)
ret_shape[axis[0]] = 1
ret_shape[axis[1]] = 1
ret = ret.reshape(ret_shape)
return ret
else:
raise ValueError("Improper number of dimensions to norm.")
# multi_dot
def multi_dot(arrays):
"""
Compute the dot product of two or more arrays in a single function call,
while automatically selecting the fastest evaluation order.
`multi_dot` chains `numpy.dot` and uses optimal parenthesization
of the matrices [1]_ [2]_. Depending on the shapes of the matrices,
this can speed up the multiplication a lot.
If the first argument is 1-D it is treated as a row vector.
If the last argument is 1-D it is treated as a column vector.
The other arguments must be 2-D.
Think of `multi_dot` as::
def multi_dot(arrays): return functools.reduce(np.dot, arrays)
Parameters
----------
arrays : sequence of array_like
If the first argument is 1-D it is treated as row vector.
If the last argument is 1-D it is treated as column vector.
The other arguments must be 2-D.
Returns
-------
output : ndarray
Returns the dot product of the supplied arrays.
See Also
--------
dot : dot multiplication with two arguments.
References
----------
.. [1] Cormen, "Introduction to Algorithms", Chapter 15.2, p. 370-378
.. [2] http://en.wikipedia.org/wiki/Matrix_chain_multiplication
Examples
--------
`multi_dot` allows you to write::
>>> from numpy.linalg import multi_dot
>>> # Prepare some data
>>> A = np.random.random(10000, 100)
>>> B = np.random.random(100, 1000)
>>> C = np.random.random(1000, 5)
>>> D = np.random.random(5, 333)
>>> # the actual dot multiplication
>>> multi_dot([A, B, C, D])
instead of::
>>> np.dot(np.dot(np.dot(A, B), C), D)
>>> # or
>>> A.dot(B).dot(C).dot(D)
Example: multiplication costs of different parenthesizations
------------------------------------------------------------
The cost for a matrix multiplication can be calculated with the
following function::
def cost(A, B): return A.shape[0] * A.shape[1] * B.shape[1]
Let's assume we have three matrices
:math:`A_{10x100}, B_{100x5}, C_{5x50}$`.
The costs for the two different parenthesizations are as follows::
cost((AB)C) = 10*100*5 + 10*5*50 = 5000 + 2500 = 7500
cost(A(BC)) = 10*100*50 + 100*5*50 = 50000 + 25000 = 75000
"""
n = len(arrays)
# optimization only makes sense for len(arrays) > 2
if n < 2:
raise ValueError("Expecting at least two arrays.")
elif n == 2:
return dot(arrays[0], arrays[1])
arrays = [asanyarray(a) for a in arrays]
# save original ndim to reshape the result array into the proper form later
ndim_first, ndim_last = arrays[0].ndim, arrays[-1].ndim
# Explicitly convert vectors to 2D arrays to keep the logic of the internal
# _multi_dot_* functions as simple as possible.
if arrays[0].ndim == 1:
arrays[0] = atleast_2d(arrays[0])
if arrays[-1].ndim == 1:
arrays[-1] = atleast_2d(arrays[-1]).T
_assertRank2(*arrays)
# _multi_dot_three is much faster than _multi_dot_matrix_chain_order
if n == 3:
result = _multi_dot_three(arrays[0], arrays[1], arrays[2])
else:
order = _multi_dot_matrix_chain_order(arrays)
result = _multi_dot(arrays, order, 0, n - 1)
# return proper shape
if ndim_first == 1 and ndim_last == 1:
return result[0, 0] # scalar
elif ndim_first == 1 or ndim_last == 1:
return result.ravel() # 1-D
else:
return result
def _multi_dot_three(A, B, C):
"""
Find the best order for three arrays and do the multiplication.
For three arguments `_multi_dot_three` is approximately 15 times faster
than `_multi_dot_matrix_chain_order`
"""
# cost1 = cost((AB)C)
cost1 = (A.shape[0] * A.shape[1] * B.shape[1] + # (AB)
A.shape[0] * B.shape[1] * C.shape[1]) # (--)C
# cost2 = cost((AB)C)
cost2 = (B.shape[0] * B.shape[1] * C.shape[1] + # (BC)
A.shape[0] * A.shape[1] * C.shape[1]) # A(--)
if cost1 < cost2:
return dot(dot(A, B), C)
else:
return dot(A, dot(B, C))
def _multi_dot_matrix_chain_order(arrays, return_costs=False):
"""
Return a np.array that encodes the optimal order of mutiplications.
The optimal order array is then used by `_multi_dot()` to do the
multiplication.
Also return the cost matrix if `return_costs` is `True`
The implementation CLOSELY follows Cormen, "Introduction to Algorithms",
Chapter 15.2, p. 370-378. Note that Cormen uses 1-based indices.
cost[i, j] = min([
cost[prefix] + cost[suffix] + cost_mult(prefix, suffix)
for k in range(i, j)])
"""
n = len(arrays)
# p stores the dimensions of the matrices
# Example for p: A_{10x100}, B_{100x5}, C_{5x50} --> p = [10, 100, 5, 50]
p = [a.shape[0] for a in arrays] + [arrays[-1].shape[1]]
# m is a matrix of costs of the subproblems
# m[i,j]: min number of scalar multiplications needed to compute A_{i..j}
m = zeros((n, n), dtype=double)
# s is the actual ordering
# s[i, j] is the value of k at which we split the product A_i..A_j
s = empty((n, n), dtype=intp)
for l in range(1, n):
for i in range(n - l):
j = i + l
m[i, j] = Inf
for k in range(i, j):
q = m[i, k] + m[k+1, j] + p[i]*p[k+1]*p[j+1]
if q < m[i, j]:
m[i, j] = q
s[i, j] = k # Note that Cormen uses 1-based index
return (s, m) if return_costs else s
def _multi_dot(arrays, order, i, j):
"""Actually do the multiplication with the given order."""
if i == j:
return arrays[i]
else:
return dot(_multi_dot(arrays, order, i, order[i, j]),
_multi_dot(arrays, order, order[i, j] + 1, j))
| artistic-2.0 |
philouc/pyhrf | python/pyhrf/test/test_plot.py | 1 | 6273 |
import unittest
import pyhrf
import os
import numpy as np
import os.path as op
import shutil
from pyhrf.tools import add_suffix
from pyhrf.plot import plot_func_slice, plot_cub_as_curve, plot_cub_as_image
import matplotlib.pyplot as plt
from pyhrf.ndarray import xndarray
class PlotCommandTest(unittest.TestCase):
def setUp(self):
tag = 'subj0_%s.nii.gz'
self.func_file = pyhrf.get_data_file_name(tag%'bold_session0')
self.anatomy_file = pyhrf.get_data_file_name(tag%'anatomy')
self.roi_mask_file = pyhrf.get_data_file_name(tag%'parcellation')
self.ax_slice = 24
self.sag_slice = 7
self.cor_slice = 34
self.tmp_dir = pyhrf.get_tmp_path() #'./'
def tearDown(self):
shutil.rmtree(self.tmp_dir)
def test_plot_func_slice_func_only(self):
cmd = 'pyhrf_plot_slice %s -a %d -d %s' \
%(self.func_file, self.ax_slice, self.tmp_dir)
if os.system(cmd) != 0 :
raise Exception('"' + cmd + '" did not execute correctly')
def test_plot_func_slice_func_only_multiple_slices(self):
cmd = 'pyhrf_plot_slice %s -a %d -d %s -s %d -c %d' \
%(self.func_file, self.ax_slice, self.tmp_dir, self.sag_slice,
self.cor_slice)
if os.system(cmd) != 0 :
raise Exception('"' + cmd + '" did not execute correctly')
def test_plot_func_slice_func_roi(self):
cmd = 'pyhrf_plot_slice %s -a %d -d %s -m %s' \
%(self.func_file, self.ax_slice, self.tmp_dir, self.roi_mask_file)
if os.system(cmd) != 0 :
raise Exception('"' + cmd + '" did not execute correctly')
def test_plot_func_slice_func_roi_anat(self):
cmd = 'pyhrf_plot_slice %s -a %d -d %s -m %s -y %s' \
%(self.func_file, self.ax_slice, self.tmp_dir, self.roi_mask_file,
self.anatomy_file)
if os.system(cmd) != 0 :
raise Exception('"' + cmd + '" did not execute correctly')
def test_plot_func_slice_func_roi_anat_multiple_slices(self):
cmd = 'pyhrf_plot_slice %s -a %d -d %s -m %s -y %s -s %d -c %d' \
%(self.func_file, self.ax_slice, self.tmp_dir, self.roi_mask_file,
self.anatomy_file, self.sag_slice, self.cor_slice)
if os.system(cmd) != 0 :
raise Exception('"' + cmd + '" did not execute correctly')
# def test_plot_func_slice_func_highlighted_roi_anat(self):
# plot_func_slice(self.func_data, parcellation=self.roi_data,
# anatomy=self.anat_data,
# highlighted_parcels_col={1:'red'})
# plt.show()
class PlotFunctionsTest(unittest.TestCase):
def setUp(self):
tag = 'subj0_%s.nii.gz'
func_file = pyhrf.get_data_file_name(tag%'bold_session0')
anatomy_file = pyhrf.get_data_file_name(tag%'anatomy')
roi_mask_file = pyhrf.get_data_file_name(tag%'parcellation')
islice = 24
cfunc = xndarray.load(func_file).sub_cuboid(time=0,axial=islice)
cfunc.set_orientation(['coronal', 'sagittal'])
self.func_data = cfunc.data
canat = xndarray.load(anatomy_file).sub_cuboid(axial=islice*3)
canat.set_orientation(['coronal', 'sagittal'])
self.anat_data = canat.data
croi_mask = xndarray.load(roi_mask_file).sub_cuboid(axial=islice)
croi_mask.set_orientation(['coronal', 'sagittal'])
self.roi_data = croi_mask.data
# self.tmp_dir = pyhrf.get_tmp_path()
# def tearDown(self):
# shutil.rmtree(self.tmp_dir)
def test_plot_cuboid_as_curve(self):
from pyhrf.ndarray import xndarray
sh = (10,10,5,3)
data = np.zeros(sh)
data[:,:,:,0] = 1.
data[:,:,:,1] = 2.
data[:,:,:,2] = 3.
c1 = xndarray(data, axes_names=['sagittal','coronal','axial','condition'],
axes_domains={'condition':['audio1','audio2', 'video']})
f = plt.figure()
ax = f.add_subplot(111)
ori = ['condition', 'sagittal']
plot_cub_as_curve(c1.sub_cuboid(axial=0, coronal=0).reorient(ori),
colors={'audio1':'red', 'audio2':'orange',
'video': 'blue'}, axes=ax)
if 0:
plt.show()
def test_plot_cuboid2d_as_image(self):
from pyhrf.ndarray import xndarray
import matplotlib
sh = (10,3)
c1 = xndarray(np.arange(np.prod(sh)).reshape(sh),
axes_names=['sagittal','condition'],
axes_domains={'condition':['audio1','audio2', 'video']})
f = plt.figure()
ax = f.add_subplot(111)
ori = ['condition', 'sagittal']
cm = matplotlib.cm.get_cmap('winter')
norm = matplotlib.colors.Normalize(vmin=5, vmax=20)
plot_cub_as_image(c1.reorient(ori), cmap=cm, norm=norm, axes=ax,
show_axes=True, show_axis_labels=True,
show_colorbar=True,
show_tick_labels=False)
if 0:
plt.show()
def test_plot_cuboid1d_as_image(self):
from pyhrf.ndarray import xndarray
import matplotlib
sh = (3,)
c2 = xndarray(np.arange(np.prod(sh)).reshape(sh),
axes_names=['condition'],
axes_domains={'condition':['audio1','audio2', 'video']})
f = plt.figure()
ax = f.add_subplot(111)
cm = matplotlib.cm.get_cmap('winter')
norm = matplotlib.colors.Normalize(vmin=0., vmax=3.)
plot_cub_as_image(c2, cmap=cm, norm=norm, axes=ax,
show_axes=True, show_axis_labels=True,
show_tick_labels=True)
if 0:
plt.show()
def test_plot_cuboid1d_as_curve(self):
from pyhrf.ndarray import xndarray
sh = (3,)
conds = np.array(['audio1','audio2', 'video'])
c2 = xndarray(np.arange(np.prod(sh)).reshape(sh),
axes_names=['condition'],
axes_domains={'condition': conds})
f = plt.figure()
ax = f.add_subplot(111)
plot_cub_as_curve(c2, axes=ax, show_axis_labels=True)
if 0:
plt.show()
| gpl-3.0 |
COMBINE-lab/matryoshka_work | coredomains-import/python-src/AnalyzeFeatures.py | 1 | 10857 | """
Usage: AnalyzeFeatures.py --domains=<domains> --feature=<feature> [--res=<res>] [--numControlBoundaries=<nc>] [--peak] [--background]
Options:
--domains=<domains> The domains file
--feature=<feature> The feature to plot
--res=<res> Resolution of the bins [default: 10000]
--background Visualize the background (within domain) signal
--peak Interpret the input as a peak file (instead of signal)
--numControlBoundaries=<nc> Number of control domains (e.g. number of do)
"""
from docopt import docopt
import Features
import numpy as np
import matplotlib
from matplotlib import pyplot as plt
from bx.intervals.intersection import Interval, IntervalTree
import os
class Domain(object):
def __init__(self, l):
toks = l.rstrip().split()
self.chrom = toks[0]
self.start = int(toks[1])
self.stop = int(toks[2])
def center(self):
return (self.start + self.stop) / 2
def length(self):
return self.stop - self.start
def overlaps(feature, start, end):
if (feature.stop >= start and feature.stop < end) or (feature.start >= start and feature.start < end):
return True
else:
return False
def getFeatPosRange(flist, domains, winDown):
domains = sorted(domains, key=lambda x: x.stop)
return flist[0].start, max(flist[-1].stop + 1, domains[-1].center() + winDown + 1)
def processPeaks(chrom, flist, domains, res, winDown):
# Get the first and last feature positions
# CODEREVIEW: We assume feature intervals don't overlap (or overlap a little)
firstFeatPos, lastFeatPos = getFeatPosRange(flist, domains, winDown) #flist[0].start, flist[-1].stop+1
# Vals will hold the feature counts along the chromosome
vals = np.zeros(lastFeatPos)
avgVals = np.zeros(lastFeatPos)
for f in flist:
# A peak gets assigned to all positions this peak overlaps
vals[f.start: f.stop+1] += 1
T = IntervalTree()
for f in flist:
T.insert_interval(Interval(f.start, f.stop))
# For every 'res' length bin, compute the average # of peaks in this bin
for idx in xrange(0, vals.shape[0] + res, res):
maxIdx = min(idx + res, vals.shape[0])
overlappingFeatures = T.find(idx, maxIdx)
avgVals[idx: maxIdx] += len(overlappingFeatures)
return avgVals, vals
def processSignal(chrom, flist, domains, res, winDown):
# Get the first and last feature positions
firstFeatPos, lastFeatPos = getFeatPosRange(flist, domains, winDown) #flist[0].start, flist[-1].stop+1
# vals will hold the feature counts along the chromosome
vals = np.zeros(lastFeatPos)
valsInp = np.zeros(lastFeatPos)
avgVals = np.zeros(lastFeatPos)
for f in flist:
# vals holds the 'foreground' signal
vals[f.start: f.stop+1] += f.value
# vals2 holds the input / 'background' signal
valsInp[f.start: f.stop+1] += f.value2
for idx in xrange(0, vals.shape[0] + res, res):
maxIdx = min(idx + res, vals.shape[0])
norm = 1.0 / (maxIdx - idx + 1)
# vals[i] contains the total number of peaks occurring at location i.
# To obtain the average for each [res]bp bin, we sum up the total
# number of peaks in each entry within the bin and divide by the bin width.
avg = None
if valsInp[idx: maxIdx].sum() > 0.0:
# The enrichment in this bin is the
avg = norm * np.log2(vals[idx: maxIdx].sum() / valsInp[idx: maxIdx].sum())
else:
avg = 0.0
avgVals[idx: maxIdx] = avg
return avgVals, vals
def main(opts):
res = int(opts['--res'])
isPeak = opts['--peak']
# Read in the peaks and sort them
feats = Features.broadPeakFileParser(opts['--feature']) if isPeak \
else Features.parseWig(opts['--feature'])
feats = sorted(feats, key=lambda x: x.chrom)
import itertools
# We'll look at a window from -500kb to +500kb centered
# about the domain boundary and domain center
winUp = -500000
winDown = 500000
# Holds the total counts in the windows around the area
# of interest
windowSumsBoundary = np.zeros(winDown - winUp)
windowSumsCenter = np.zeros(winDown - winUp)
numWindowsBoundary, numWinsCenter = 0, 0
numBoundariesWithFeature, numDomainsWithFeature = 0, 0
# Read the domains from file, and sort them
allDomains = None
with open(opts['--domains'], 'rb') as ifile:
allDomains = [Domain(l) for l in ifile]
# Group the domains by chromosome
allDomains = sorted(allDomains, key=lambda x: x.chrom)
numTotalDomains = len(allDomains)
allDomains = {chrom: list(dlist) for chrom, dlist in itertools.groupby(allDomains, lambda x: x.chrom)}
domainLengths = np.array([d.length() for dlist in allDomains.itervalues() for d in dlist])
import scipy as sp
import scipy.stats
# We'll maintain separate signal averages for the lower, mid and upper tritiles
dlpercentiles = [sp.stats.scoreatpercentile(domainLengths, p) for p in [25, 50, 75, 100]]
windowSumsCenters = [np.zeros(winDown - winUp) for dlp in dlpercentiles]
numWinsCenters = [0]*len(dlpercentiles)
interDomainLengths = [] # CODEREVIEW: May not be used
numControlPeaks = numTotalDomains
if opts['--numControlBoundaries']:
numControlPeaks = int(opts['--numControlBoundaries'])
sampleRatio = float(numControlPeaks) / numTotalDomains
print(sampleRatio)
displayBackground = opts['--background']
# Iterate over the features by chromosome
for chrom, flist in itertools.groupby(feats, lambda x: x.chrom):
# both the features and domains must be defined for this chromosome
if chrom not in allDomains or chrom == "chrX":
continue
flist = list(flist) # groupby gives us a generator, we need a list
flist = sorted(flist, key=lambda x: x.start)
domains = sorted(allDomains[chrom], key=lambda x: x.start)
print("Processing {}".format(chrom))
signal, vals = processPeaks(chrom, flist, domains, res, winDown) if isPeak\
else processSignal(chrom, flist, domains, res, winDown)
# Get the first and last feature positions
firstFeatPos, lastFeatPos = getFeatPosRange(flist, domains, winDown) #flist[0].start, flist[-1].stop+1
# CODEREVIEW: Assuming d1 < d2
def isBoundary(d1, d2):
return (d2.start - d1.stop) <= 400000
def boundaryMidpoint(d1, d2):
return (d2.start + d1.stop) / 2
def isValidLocus(l):
return l + winUp >= 0 and l + winDown < lastFeatPos
domainBuffer = 20000
numDomains = len(domains)
import bisect
import random
for i, j in itertools.izip(xrange(numDomains), xrange(1, numDomains)):
if random.random() > sampleRatio:
continue
d1, d2 = domains[i], domains[j]
interDomainLengths.append(d2.start - d1.stop)
if isBoundary(d1, d2):
mid = boundaryMidpoint(d1, d2)
if isValidLocus(mid):
# CODEREVIEW should be winup - windown
sig = np.zeros(1000000)
for idx in xrange(0, sig.shape[0] + res, res):
maxIdx = min(idx + res, sig.shape[0])
norm = 1.0 / (maxIdx - idx + 1)
sig[idx: maxIdx] = signal[(mid + winUp) + idx: (mid + winUp) + maxIdx]
windowSumsBoundary += sig
numWindowsBoundary += 1
numBoundariesWithFeature += 1 if vals[d1.stop: d2.start].sum() > 0 else 0
# The middle of the domain
if isValidLocus(d1.center()):
sig = np.zeros(1000000)
for idx in xrange(0, sig.shape[0] + res, res):
maxIdx = min(idx + res, sig.shape[0])
norm = 1.0 / (maxIdx - idx + 1)
sig[idx: maxIdx] = signal[(d1.center() + winUp) + idx: (d1.center() + winUp) + maxIdx]
percentileIndex = bisect.bisect_left(dlpercentiles, d1.length())
windowSumsCenters[percentileIndex] += sig
numWinsCenters[percentileIndex] += 1
windowSumsCenter += sig
numWinsCenter += 1
numDomainsWithFeature += 1 if vals[d1.start: d1.stop].sum() > 0 else 0
print("Total boundaries : {}".format(numWindowsBoundary))
print("Total boundaries/w >= 1 feature : {}".format(numBoundariesWithFeature))
print("Fraction of boundaries/w feature : {}".format(float(numBoundariesWithFeature) / numWindowsBoundary))
print("Total domains : {}".format(numWinsCenter))
print("Total domains/w >= 1 feature : {}".format(numDomainsWithFeature))
print("Fraction of domains/w feature : {}".format(float(numDomainsWithFeature) / numWinsCenter))
windowSumsBoundary /= numWindowsBoundary
windowSumsCenter /= numWinsCenter
# for i in xrange(len(windowSumsCenters)):
# windowSumsCenters[i] /= numWinsCenters[i]
import scipy.integrate
areaUnderBoundarySignal = scipy.integrate.trapz(windowSumsBoundary)
areaUnderDomainSignal = scipy.integrate.trapz(windowSumsCenter)
areaDifference = areaUnderBoundarySignal - areaUnderDomainSignal
print("areaDifference = {}".format(areaDifference))
halfRes = res/2
sigLen = len(windowSumsBoundary)
boundaryPlotPoints = windowSumsBoundary[halfRes:sigLen+halfRes:res]
domainPlotPoints = windowSumsCenter[halfRes:sigLen+halfRes:res]
# percentileDomainPlotPoints = [windowSumsCenters[i][halfRes:sigLen+halfRes:res] for i in xrange(len(windowSumsCenters))]
font = {'family': 'normal',
'size': 18}
matplotlib.rc('font', **font)
plt.plot(boundaryPlotPoints, linewidth=4, label="boundaries")
plt.plot(domainPlotPoints, linewidth=4)
# for i, pdp in enumerate(percentileDomainPlotPoints):
# plt.plot(pdp, linewidth=4, label="d_len <= {}".format(dlpercentiles[i]))
# plt.legend()
plt.gca().yaxis.set_major_locator(matplotlib.ticker.MaxNLocator(prune='lower'))
plt.ylabel("Average # Peaks per 10kb")
numPlotPoints = len(boundaryPlotPoints)
plt.xticks([0, numPlotPoints / 2, numPlotPoints], ["-500kb", "boundary", "500kb"])
dname = opts['--domains'].split(os.path.sep)[-1]
fname = opts['--feature'].split(os.path.sep)[-1]
plt.gcf().set_size_inches(10, 9)
plt.savefig('{}_{}.pdf'.format(dname, fname))
plt.show()
if __name__ == "__main__":
opts = docopt(__doc__, version="AnalyzeFeatures v1.0")
main(opts)
| gpl-3.0 |
lkloh/aimbat-lite | src/pysmo/aimbat/pickphase.py | 2 | 29237 | #!/usr/bin/env python
#------------------------------------------------
# Filename: pickphase.py
# Author: Xiaoting Lou
# Email: [email protected]
#
# Copyright (c) 2009-2012 Xiaoting Lou
#------------------------------------------------
"""
Python module for plot and pick phase (SAC PPK) on seismograms in one axes.
Differences from plotphase.py:
* User interaction: set time picks and time window
* Plot: always plot time picks
* Plot: always use integer numbers (plot within +/-0.5) as ybases,
but not dist/az/baz (even when sorted by d/a/b)
* Plot: can plot seismograms in multiple pages (page navigation).
* Normalization: can normalize within time window
Keyboard and mouse actions:
* Click mouse to select a span to zoom in seismograms.
* Press 'z' to go back to last window span.
* Press 'w' to save the current xlimit as time window.
* Press 't[0-9]' to set time picks like SAC PPK.
Program structure:
PickPhaseMenu
||
PickPhase Button Prev + Button Next + Button Save + Button Quit
:copyright:
Xiaoting Lou
:license:
GNU General Public License, Version 3 (GPLv3)
http://www.gnu.org/licenses/gpl.html
"""
from pylab import *
import sys
from matplotlib.widgets import Button
from matplotlib import transforms
from matplotlib.font_manager import FontProperties
from ttconfig import PPConfig, getParser
from qualsort import initQual, seleSeis, sortSeisQual, sortSeisHeader
from sacpickle import loadData, saveData
from plotutils import TimeSelector, dataNorm, axLimit, pickLegend, indexBaseTick
import filtering as ftr
from scipy import signal
import tkMessageBox
def getOptions():
""" Parse arguments and options. """
parser = getParser()
maxsel = 25
maxdel = 5
maxnum = maxsel, maxdel
sortby = 'i'
parser.set_defaults(maxnum=maxnum)
parser.set_defaults(sortby=sortby)
parser.add_option('-b', '--boundlines', action="store_true", dest='boundlines_on',
help='Plot bounding lines to separate seismograms.')
parser.add_option('-n', '--netsta', action="store_true", dest='nlab_on',
help='Label seismogram by net.sta code instead of SAC file name.')
parser.add_option('-m', '--maxnum', dest='maxnum', type='int', nargs=2,
help='Maximum number of selected and deleted seismograms to plot. Defaults: {0:d} and {1:d}.'.format(maxsel, maxdel))
parser.add_option('-s', '--sortby', type='str', dest='sortby',
help='Sort seismograms by i (file indices), or 0/1/2/3 (quality factor all/ccc/snr/coh), or a given header (az/baz/dist..). Append - for decrease order, otherwise increase. Default is {:s}.'.format(sortby))
opts, files = parser.parse_args(sys.argv[1:])
if len(files) == 0:
print parser.usage
sys.exit()
return opts, files
# ############################################################################### #
# #
# CLASS: PickPhase #
# #
# ############################################################################### #
class PickPhase:
"""
Plot one single seismogram with given attributes.
See self.on_press for options on setting time picks and time window.
"""
def __init__(self, sacdh, opts, axpp, ybase, color='b', linew=1, alpha=1):
self.sacdh = sacdh
self.opts = opts
self.axpp = axpp
self.ybase = ybase
self.color = color
self.linew = linew
self.alpha = alpha
self.makeTime()
if opts.twin_on:
self.plotWindow()
self.plotWave()
self.connect()
def makeTime(self):
"""
Create array x as time series and get reference time.
"""
sacdh = self.sacdh
b, npts, delta = sacdh.b, sacdh.npts, sacdh.delta
# Use linspace instead of arange to keep len(time) == npts.
# Arange give an extra point for large window.
#self.time = arange(b, b+npts*delta, delta)
self.time = linspace(b, b+(npts-1)*delta, npts)
reltime = self.opts.reltime
if reltime >= 0:
reftime = sacdh.thdrs[reltime]
if reftime == -12345.0:
out = 'Time pick T{0:d} is not defined in SAC file {1:s} of station {2:s}'
print(out.format(reltime, sacdh.filename, sacdh.netsta))
sys.exit()
else:
sacdh.reftime = reftime
else:
sacdh.reftime = 0.
def plotWave(self):
"""
Plot wiggled or filled waveform, which is normalized (if not stacking) and shifted to ybase.
Fill both plus and negative side of signal but with different transparency.
If opts.fill == 0: no fill.
If opts.fill > 0: alpha of negative side is a quarter of plus side.
If opts.fill < 0: alpha of plus side is a quarter of negative side.
"""
opts = self.opts
ybase = self.ybase
x = self.time - self.sacdh.reftime
d = self.sacdh.data
# filter time signal d
if hasattr(opts, 'filterParameters') and opts.filterParameters['apply']:
NYQ = 1.0/(2*opts.delta)
# make filter, default is bandpass
Wn = [opts.filterParameters['lowFreq']/NYQ, opts.filterParameters['highFreq']/NYQ]
B, A = signal.butter(opts.filterParameters['order'], Wn, analog=False, btype='bandpass')
if opts.filterParameters['band']=='lowpass':
Wn = opts.filterParameters['lowFreq']/NYQ
B, A = signal.butter(opts.filterParameters['order'], Wn, analog=False, btype='lowpass')
elif opts.filterParameters['band']=='highpass':
Wn = opts.filterParameters['highFreq']/NYQ
B, A = signal.butter(opts.filterParameters['order'], Wn, analog=False, btype='highpass')
d = signal.lfilter(B, A, d)
axpp = self.axpp
if self.opts.ynorm > 0:
# normalize data within time window
if self.opts.ynormtwin_on:
try:
indmin, indmax = searchsorted(self.time, self.twindow)
indmax = min(len(x)-1, indmax)
thisd = d[indmin:indmax+1]
dnorm = dataNorm(thisd)
except:
dnorm = dataNorm(d)
else:
dnorm = dataNorm(d)
dnorm = 1/dnorm*self.opts.ynorm*.5
else:
dnorm = 1
y = d * dnorm
# plot
self.ynorm = [dnorm,]
line, = axpp.plot(x, y+ybase, ls='-', color=self.color, lw=self.linew, alpha=self.alpha, picker=5)
self.lines = [line,]
if opts.fill == 0:
axpp.axhline(y=ybase, color='k', ls=':')
self.wvfills = []
else:
f = opts.fill
fplus, fnega, = [], []
for i in range(len(x)):
if f*y[i] > 0:
fplus.append(True)
fnega.append(False)
else:
fplus.append(False)
fnega.append(True)
wvfillplus = axpp.fill_between(x, ybase, y+ybase, where=fplus, color=self.color, alpha=self.alpha*0.6)
wvfillnega = axpp.fill_between(x, ybase, y+ybase, where=fnega, color=self.color, alpha=self.alpha*0.2)
self.wvfills = [wvfillplus, wvfillnega]
self.labelStation()
def labelStation(self):
""" label the seismogram with file name or net.sta
"""
axpp = self.axpp
sacdh = self.sacdh
if self.opts.nlab_on:
slab = '{0:<8s}'.format(sacdh.netsta)
else:
slab = sacdh.filename.split('/')[-1]
if self.opts.labelqual:
hdrcc, hdrsn, hdrco = self.opts.qheaders[:3]
cc = sacdh.gethdr(hdrcc)
sn = sacdh.gethdr(hdrsn)
co = sacdh.gethdr(hdrco)
slab += 'qual={0:4.2f}/{1:.1f}/{2:4.2f}'.format(cc, sn, co)
trans = transforms.blended_transform_factory(axpp.transAxes, axpp.transData)
font = FontProperties()
font.set_family('monospace')
self.stalabel = axpp.text(1.025, self.ybase, slab, transform=trans, va='center',
color=self.color, fontproperties=font)
def on_pick(self, event):
""" Click a seismogram to show file name.
"""
if not len(event.ind): return True
pick = False
for line in self.lines:
if event.artist == line:
pick = True
if not pick: return True
try:
print('Seismogram picked: {:s} '.format(self.sacdh.filename))
except AttributeError:
print('Not a SAC file')
self.sacdh.selected = not self.sacdh.selected
if self.sacdh.selected:
self.sacdh.sethdr(self.opts.hdrsel, 'True ')
else:
self.sacdh.sethdr(self.opts.hdrsel, 'False ')
self.changeColor()
def changeColor(self):
""" Change color of a seismogram based on selection status.
"""
if self.sacdh.selected:
col = self.opts.pppara.colorwave
else:
col = self.opts.pppara.colorwavedel
setp(self.stalabel, color=col)
setp(self.lines[0], color=col)
if self.wvfills != []:
setp(self.wvfills[0], color=col)
setp(self.wvfills[1], color=col)
self.axpp.figure.canvas.draw()
def changeBase(self, newbase):
""" Change ybase of a seismogram.
"""
setp(self.lines[0], ydata=newbase)
def plotWindow(self):
""" Plot time window (xmin,xmax) with color fill.
"""
axpp = self.axpp
sacdh = self.sacdh
twh0, twh1 = self.opts.pppara.twhdrs
self.twhdrs = twh0, twh1
tw0 = sacdh.gethdr(twh0)
tw1 = sacdh.gethdr(twh1)
if tw0 == -12345.0:
tw0 = self.x[0]
if tw1 == -12345.0:
tw1 = self.x[-1]
self.twindow = [tw0, tw1]
tw0 -= sacdh.reftime
tw1 -= sacdh.reftime
#ymin, ymax = axpp.get_ylim()
ymin, ymax = self.ybase-0.5, self.ybase+0.5
pppara = self.opts.pppara
a, col = pppara.alphatwfill, pppara.colortwfill
self.twfill, = axpp.fill([tw0,tw1,tw1,tw0],
[ymin,ymin,ymax,ymax], col, alpha=a, edgecolor=col)
def resetWindow(self):
""" Reset time window when a span is selected.
"""
tw, reftime = self.twindow, self.sacdh.reftime
tw0 = tw[0] - reftime
tw1 = tw[1] - reftime
xypoly = self.twfill.get_xy()
xypoly[0:5,0] = ones(5)*tw0
xypoly[1:3,0] = ones(2)*tw1
self.twfill.set_xy(xypoly)
def plotPicks(self):
""" Plot time picks at ybase +/- 0.5
"""
sacdh = self.sacdh
axpp = self.axpp
pppara = self.opts.pppara
npick = pppara.npick
cols = pppara.pickcolors
ncol = len(cols)
lss = pppara.pickstyles
thdrs = array(sacdh.thdrs) - sacdh.reftime
timepicks = [None]*npick
for i in range(npick):
tpk = thdrs[i]
ia = i%ncol
ib = i/ncol
col = cols[ia]
ls = lss[ib]
xx = [tpk, tpk]
yy = [self.ybase-.5, self.ybase+.5]
timepicks[i], = axpp.plot(xx, yy, color=col,ls=ls,lw=1.5)
self.timepicks = timepicks
def on_press(self, event):
"""
Key press event. Valid only if axpp contains event (within 0.5 from ybase).
Options:
--------
(1) t + digits 0-9: set a time pick in SAC header.
(2) w: set the current xlim() as time window.
"""
evkey = event.key
axpp = self.axpp
contains, attr = axpp.contains(event)
if not contains or evkey is None: return
if abs(event.ydata-self.ybase) > 0.5: return
opts = self.opts
sacdh = self.sacdh
twin_on = opts.twin_on
reftime = sacdh.reftime
evkey0 = self.evkeys[1]
self.evkeys = evkey0 + evkey
if evkey.lower() == 'w' and twin_on:
twh0, twh1 = self.twhdrs
xxlim = axpp.get_xlim()
tw0 = xxlim[0] + reftime
tw1 = xxlim[1] + reftime
sacdh.sethdr(twh0, tw0)
sacdh.sethdr(twh1, tw1)
self.twindow = [tw0, tw1]
out = 'File {:s}: set time window to {:s} and {:s}: {:6.1f} - {:6.1f} s'
print(out.format(sacdh.filename, twh0, twh1, tw0, tw1))
self.resetWindow()
elif evkey0.lower() == 't' and evkey.isdigit() and opts.pick_on:
timepicks = self.timepicks
ipk = 't' + evkey
ipick = int(evkey)
tpk = event.xdata
atpk = tpk + reftime
sacdh.thdrs[ipick] = atpk
out = 'File {:s}: pick phase {:s} = {:6.1f} s, absolute = {:6.1f} s. '
print(out.format(sacdh.filename, ipk, tpk, atpk))
timepicks[ipick].set_xdata(tpk)
axpp.figure.canvas.draw()
def updateY(self, xxlim):
""" Update ynorm for wave wiggle from given xlim.
"""
x = self.time - self.sacdh.reftime
d = self.sacdh.data
indmin, indmax = searchsorted(x, xxlim)
indmax = min(len(x)-1, indmax)
thisd = d[indmin:indmax+1]
if len(thisd) > 0 and self.opts.ynorm > 0:
dnorm = dataNorm(thisd)
dnorm = 1/dnorm*self.opts.ynorm*.5
else:
dnorm = self.ynorm[-1]
self.ynorm.append(dnorm)
setp(self.lines[0], ydata=self.ybase+d*dnorm)
def connect(self):
self.cidpick = self.axpp.figure.canvas.mpl_connect('pick_event', self.on_pick)
self.cidpress = self.axpp.figure.canvas.mpl_connect('key_press_event', self.on_press)
self.evkeys = 'xx'
def disconnect(self):
self.axpp.figure.canvas.mpl_disconnect(self.cidpick)
self.axpp.figure.canvas.mpl_disconnect(self.cidpress)
def disconnectPick(self):
self.axpp.figure.canvas.mpl_disconnect(self.cidpick)
# ############################################################################### #
# #
# CLASS: PickPhase #
# #
# ############################################################################### #
# ############################################################################### #
# #
# CLASS: PickPhaseMenu #
# #
# ############################################################################### #
class PickPhaseMenu():
"""
Plot a group of seismogram gathers.
Set up axes attributes.
Create Button Save to save SAC headers to files.
"""
def __init__(self, gsac, opts, axs):
self.gsac = gsac
self.opts = opts
self.axs = axs
self.axpp = axs['Seis']
self.initIndex()
self.plotSeis()
self.plotSpan()
self.connect()
pppara = opts.pppara
pickLegend(self.axpp, pppara.npick, pppara.pickcolors, pppara.pickstyles)
def plotSeis(self):
self.plotWave()
self.setLimits()
self.setLabels()
if self.opts.pick_on:
self.plotPicks()
self.labelSelection()
def initIndex(self):
""" Initialize indices for page navigation.
"""
opts = self.opts
axs = self.axs
selist = self.gsac.selist
delist = self.gsac.delist
nsel = len(selist)
ndel = len(delist)
maxsel, maxdel = opts.maxnum
pagesize = maxsel + maxdel
aipages, ayindex, aybases, ayticks = indexBaseTick(nsel, ndel, pagesize, maxsel)
self.aipages = aipages
self.ayindex = ayindex
self.aybases = aybases
self.ayticks = ayticks
self.sedelist = [selist, delist]
self.ipage = 0
def plotWave(self):
""" Plot waveforms for this page.
"""
opts = self.opts
axpp = self.axpp
ipage = self.ipage
ayindex, aybases, ayticks = self.ayindex, self.aybases, self.ayticks
sedelist = self.sedelist
plists = [ [ sedelist[j][k] for k in ayindex[ipage][j] ] for j in range(2)]
pbases = [ [ k for k in aybases[ipage][j] ] for j in range(2)]
pticks = [ [ k for k in ayticks[ipage][j] ] for j in range(2)]
npsel = len(pbases[0])
npdel = len(pbases[1])
nsede = [npsel, npdel]
# get colors from sacdh.selected
colsel = opts.pppara.colorwave
coldel = opts.pppara.colorwavedel
colors = [[None,] * npsel , [None,] * npdel]
for j in range(2):
for k in range(nsede[j]):
if plists[j][k].selected:
colors[j][k] = colsel
else:
colors[j][k] = coldel
# plot
pps = []
for j in range(2):
nsd = nsede[j]
for k in range(nsd):
linews = ones(nsd)
alphas = ones(nsd)
pp = PickPhase(plists[j][k], opts, axpp, pbases[j][k], colors[j][k])
pps.append(pp)
self.pps = pps
self.ybases = pbases[0] + pbases[1]
self.yticks = pticks[0] + pticks[1]
abases = pbases[1]+pbases[0]
self.azylim = abases[-1]-1, abases[0]+1
def replot(self, ipage):
""" Finish plotting of current page and move to prev/next.
"""
self.ipage = ipage
if not self.ipage in self.aipages:
print ('End of page.')
return
self.finish()
self.plotSeis()
def on_select(self, xmin, xmax):
""" Mouse event: select span. """
if self.span.visible:
print 'span selected: %6.1f %6.1f ' % (xmin, xmax)
xxlim = (xmin, xmax)
self.axpp.set_xlim(xxlim)
self.xzoom.append(xxlim)
if self.opts.upylim_on:
print ('upylim')
for pp in self.pps: pp.updateY(xxlim)
self.axpp.figure.canvas.draw()
# change window size in seismograms plot here
def plotSpan(self):
""" Create a SpanSelector for zoom in and zoom out.
"""
pppara = self.opts.pppara
a, col = pppara.alphatwsele, pppara.colortwsele
mspan = pppara.minspan * self.opts.delta
self.span = TimeSelector(self.axpp, self.on_select, 'horizontal', minspan=mspan, useblit=False,
rectprops=dict(alpha=a, facecolor=col))
def on_zoom(self, event):
""" Zoom back to previous xlim when event is in event.inaxes.
"""
evkey = event.key
axpp = self.axpp
if not axpp.contains(event)[0] or evkey is None: return
xzoom = self.xzoom
if evkey.lower() == 'z' and len(xzoom) > 1:
del xzoom[-1]
axpp.set_xlim(xzoom[-1])
print 'Zoom back to: %6.1f %6.1f ' % tuple(xzoom[-1])
if self.opts.upylim_on:
for pp in self.pps:
del pp.ynorm[-1]
setp(pp.lines[0], ydata=pp.ybase+pp.sacdh.data*pp.ynorm[-1])
axpp.figure.canvas.draw()
def plotPicks(self):
for pp in self.pps:
pp.plotPicks()
pppara = self.opts.pppara
def setLabels(self):
""" Set axes labels and page label"""
axpp = self.axpp
axpp.set_yticks(self.ybases)
axpp.set_yticklabels(self.yticks)
axpp.set_ylabel('Trace Number')
axpp.axhline(y=0, lw=2, color='r')
if self.opts.boundlines_on:
for yy in range(self.azylim[0], self.azylim[1]):
axpp.axhline(y=yy+0.5, color='black')
reltime = self.opts.reltime
if reltime >= 0:
axpp.set_xlabel('Time - T%d [s]' % reltime)
else:
axpp.set_xlabel('Time [s]')
trans = transforms.blended_transform_factory(axpp.transAxes, axpp.transAxes)
page = 'Page {0:d} of [{1:d},{2:d}]'.format(self.ipage, self.aipages[0], self.aipages[-1])
self.pagelabel = axpp.text(1, -0.02, page, transform=trans, va='top', ha='right')
def setLimits(self):
""" Set axes limits """
axpp = self.axpp
self.getXLimit()
axpp.set_xlim(self.xzoom[0])
axpp.set_ylim(self.azylim)
# plot time zero lines and set axis limit
axpp.axvline(x=0, color='k', ls=':')
if not self.opts.xlimit is None:
axpp.set_xlim(self.opts.xlimit)
def labelSelection(self):
""" Label selection status with transform (transAxes, transData).
"""
axpp = self.axpp
trans = transforms.blended_transform_factory(axpp.transAxes, axpp.transData)
colsel = self.opts.pppara.colorwave
coldel = self.opts.pppara.colorwavedel
axpp.annotate('Selected', xy=(1.015, self.azylim[0]), xycoords=trans, xytext=(1.03, -0.17),
size=10, va='top', color=colsel,
bbox=dict(boxstyle="round,pad=.2", fc='w', ec=(1,.5,.5)),
arrowprops=dict(arrowstyle="->",connectionstyle="angle,angleA=0,angleB=-90,rad=20",color=colsel, lw=2),)
axpp.annotate('Deselected', xy=(1.015, self.azylim[1]), xycoords=trans, xytext=(1.03, 0.17),
size=10, va='bottom', color=coldel,
bbox=dict(boxstyle="round,pad=.2", fc='w', ec=(1,.5,.5)),
arrowprops=dict(arrowstyle="->",connectionstyle="angle,angleA=0,angleB=-90,rad=20",color=coldel, lw=2),)
def getXLimit(self):
""" Get x limit (relative to reference time) """
pps = self.pps
b = [ pp.time[0] - pp.sacdh.reftime for pp in pps ]
e = [ pp.time[-1] - pp.sacdh.reftime for pp in pps ]
npts = [ len(pp.time) for pp in pps ]
self.bmin = min(b)
self.bmax = max(b)
self.emin = min(e)
self.emax = max(e)
mm = self.bmin, self.emax
xxlim = axLimit(mm)
self.xzoom = [xxlim,]
def getYLimit(self):
""" Get y limit """
saclist = self.gsac.saclist
delta = saclist[0].delta
data = array([ [min(sacdh.data), max(sacdh.data) ] for sacdh in saclist ])
self.dmin = data[:,0].min()
self.dmax = data[:,1].max()
def fron(self, event):
self.bnfron.label.set_text('Wait...')
self.axpp.get_figure().canvas.draw()
self.replot(0)
self.bnfron.label.set_text('Front')
self.axpp.get_figure().canvas.draw()
# zoom back to original screen size
def zoba(self, event):
self.bnzoba.label.set_text('Wait...')
self.axpp.get_figure().canvas.draw()
self.replot(self.ipage)
self.bnzoba.label.set_text('Zoom\nBack')
self.axpp.get_figure().canvas.draw()
def prev(self, event):
self.bnprev.label.set_text('Wait...')
self.axpp.get_figure().canvas.draw()
self.replot(self.ipage-1)
self.bnprev.label.set_text('Prev')
self.axpp.get_figure().canvas.draw()
def next(self, event):
self.bnnext.label.set_text('Wait...')
self.axpp.get_figure().canvas.draw()
self.replot(self.ipage+1)
self.bnnext.label.set_text('Next')
self.axpp.get_figure().canvas.draw()
# ---------------------------- SAVE HEADERS FILES ------------------------------- #
def save(self, event):
self.getSaveAxes()
self.save_connect()
def getSaveAxes(self):
saveFigure = figure(figsize=(8,1))
saveFigure.clf()
# size of save buttons
rect_saveHeaders = [0.04,0.2,0.2,0.6]
rect_saveHeadersFilterParams = [0.28,0.2,0.2,0.6]
rect_saveHeadersOverride = [0.52,0.2,0.2,0.6]
rect_saveQuit = [0.76,0.2,0.2,0.6]
#initalize axes
saveAxs = {}
saveAxs['saveHeaders'] = saveFigure.add_axes(rect_saveHeaders)
saveAxs['saveHeadersFilterParams'] = saveFigure.add_axes(rect_saveHeadersFilterParams)
saveAxs['saveHeadersOverride'] = saveFigure.add_axes(rect_saveHeadersOverride)
saveAxs['saveQuit'] = saveFigure.add_axes(rect_saveQuit)
self.saveAxs = saveAxs
self.saveFigure = saveFigure
self.save_connect()
show()
def save_connect(self):
#set buttons
self.bn_saveHeaders = Button(self.saveAxs['saveHeaders'], 'Save\nHeaders\nOnly')
self.bn_saveHeadersFilterParams = Button(self.saveAxs['saveHeadersFilterParams'], 'Save Headers &\n Filter Parameters')
self.bn_saveHeadersOverride = Button(self.saveAxs['saveHeadersOverride'], 'Save Headers &\nOverride Data')
self.bn_saveQuit = Button(self.saveAxs['saveQuit'], 'Quit')
#connect buttons to functions they trigger
self.cid_saveHeaders = self.bn_saveHeaders.on_clicked(self.save_headers)
self.cid_savedHeadersFilterParams = self.bn_saveHeadersFilterParams.on_clicked(self.save_headers_filterParams)
self.cid_saveHeadersOverride = self.bn_saveHeadersOverride.on_clicked(self.save_headers_override)
self.cid_saveQuit = self.bn_saveQuit.on_clicked(self.save_quit)
def save_quit(self, event):
self.save_disconnect()
close(self.saveFigure)
def save_disconnect(self):
self.bn_saveHeaders.disconnect(self.cid_saveHeaders)
self.bn_saveHeaders.disconnect(self.cid_savedHeadersFilterParams)
self.bn_saveHeadersOverride.disconnect(self.cid_saveHeadersOverride)
# self.saveAxs['saveHeaders'].cla()
# self.saveAxs['saveHeadersOverride'].cla()
# self.saveAxs['saveQuit'].cla()
"""save headers only"""
def save_headers(self, event):
saveData(self.gsac, self.opts)
"""save headers and override data with filtered data
@lowFreq -> user0
@highFreq -> user1
@band -> kuser0
@order -> kuser1, need to convert to integer form alphanumeric
"""
def save_headers_filterParams(self, event):
# write params to file
for sacdh in self.gsac.saclist:
sacdh.user0 = self.opts.filterParameters['lowFreq']
sacdh.user1 = self.opts.filterParameters['highFreq']
sacdh.kuser0 = self.opts.filterParameters['band']
sacdh.kuser1 = self.opts.filterParameters['order']
if 'stkdh' in self.gsac.__dict__:
self.gsac.stkdh.user0 = self.opts.filterParameters['lowFreq']
self.gsac.stkdh.user1 = self.opts.filterParameters['highFreq']
self.gsac.stkdh.kuser0 = self.opts.filterParameters['band']
self.gsac.stkdh.kuser1 = self.opts.filterParameters['order']
# save
saveData(self.gsac, self.opts)
def save_headers_override(self, event):
shouldRun = tkMessageBox.askokcancel("Will Override Files!","This will override the data in your files with the filtered data. \nAre you sure?")
if shouldRun:
for sacdh in self.gsac.saclist:
sacdh.data = ftr.filtering_time_signal(sacdh.data, self.opts.delta, self.opts.filterParameters['lowFreq'], self.opts.filterParameters['highFreq'], self.opts.filterParameters['band'], self.opts.filterParameters['order'])
if 'stkdh' in self.gsac.__dict__:
self.gsac.stkdh.data = ftr.filtering_time_signal(self.gsac.stkdh.data, self.opts.delta, self.opts.filterParameters['lowFreq'], self.opts.filterParameters['highFreq'], self.opts.filterParameters['band'], self.opts.filterParameters['order'])
saveData(self.gsac, self.opts)
# ---------------------------- SAVE HEADERS FILES ------------------------------- #
def quit(self, event):
self.finish()
self.disconnect(event.canvas)
close('all')
def connect(self):
self.axfron = self.axs['Fron']
self.axprev = self.axs['Prev']
self.axnext = self.axs['Next']
self.axzoba = self.axs['Zoba']
self.axsave = self.axs['Save']
self.axquit = self.axs['Quit']
self.bnfron = Button(self.axfron, 'Front')
self.bnprev = Button(self.axprev, 'Prev')
self.bnnext = Button(self.axnext, 'Next')
self.bnzoba = Button(self.axzoba, 'Zoom \n Back')
self.bnsave = Button(self.axsave, 'Save')
self.bnquit = Button(self.axquit, 'Quit')
self.cidfron = self.bnfron.on_clicked(self.fron)
self.cidprev = self.bnprev.on_clicked(self.prev)
self.cidnext = self.bnnext.on_clicked(self.next)
self.cidzoba = self.bnzoba.on_clicked(self.zoba)
self.cidsave = self.bnsave.on_clicked(self.save)
self.cidquit = self.bnquit.on_clicked(self.quit)
self.cidpress = self.axpp.figure.canvas.mpl_connect('key_press_event', self.on_zoom)
def disconnect(self, canvas):
# self.bnfron.disconnect(self.cidfron)
# self.bnprev.disconnect(self.cidprev)
# self.bnnext.disconnect(self.cidnext)
# self.bnzoba.disconnect(self.cidzoba)
# self.bnsave.disconnect(self.cidsave)
self.axfron.cla()
self.axprev.cla()
self.axnext.cla()
self.axzoba.cla()
self.axsave.cla()
self.axquit.cla()
canvas.mpl_disconnect(self.cidpress)
self.span.visible = False
def finish(self):
for pp in self.pps:
pp.disconnect()
self.axpp.cla()
# ############################################################################### #
# #
# CLASS: PickPhaseMenu #
# #
# ############################################################################### #
# ############################################################################### #
# #
# SortSeis #
# #
# ############################################################################### #
def sortSeis(gsac, opts):
'Sort seismograms by file indices, quality factors, or a given header'
sortby = opts.sortby
# determine increase/decrease order
if sortby[-1] == '-':
sortincrease = False
sortby = sortby[:-1]
else:
sortincrease = True
opts.labelqual = False
# sort
if sortby == 'i': # by file indices
gsac.selist, gsac.delist = seleSeis(gsac.saclist)
elif sortby.isdigit() or sortby in opts.qheaders + ['all',]: # by quality factors
opts.labelqual = True
if sortby == '1' or sortby == 'ccc':
opts.qweights = [1, 0, 0]
elif sortby == '2' or sortby == 'snr':
opts.qweights = [0, 1, 0]
elif sortby == '3' or sortby == 'coh':
opts.qweights = [0, 0, 1]
gsac.selist, gsac.delist = sortSeisQual(gsac.saclist, opts.qheaders, opts.qweights, opts.qfactors, sortincrease)
else: # by a given header
gsac.selist, gsac.delist = sortSeisHeader(gsac.saclist, sortby, sortincrease)
return
# ############################################################################### #
# #
# SortSeis #
# #
# ############################################################################### #
def getAxes(opts):
'Get axes for plotting'
fig = figure(figsize=(13, 11))
rcParams['legend.fontsize'] = 11
if opts.labelqual:
rectseis = [0.1, 0.06, 0.65, 0.85]
else:
rectseis = [0.1, 0.06, 0.75, 0.85]
axpp = fig.add_axes(rectseis)
axs = {}
axs['Seis'] = axpp
dx = 0.07
x0 = rectseis[0] + rectseis[2] + 0.01
xq = x0 - dx*1
xs = x0 - dx*2
xn = x0 - dx*3
xp = x0 - dx*4
rectprev = [xp, 0.93, 0.06, 0.04]
rectnext = [xn, 0.93, 0.06, 0.04]
rectsave = [xs, 0.93, 0.06, 0.04]
rectquit = [xq, 0.93, 0.06, 0.04]
axs['Prev'] = fig.add_axes(rectprev)
axs['Next'] = fig.add_axes(rectnext)
axs['Save'] = fig.add_axes(rectsave)
axs['Quit'] = fig.add_axes(rectquit)
return axs
def getDataOpts():
'Get SAC Data and Options'
opts, ifiles = getOptions()
pppara = PPConfig()
gsac = loadData(ifiles, opts, pppara)
opts.pppara = pppara
opts.qheaders = pppara.qheaders
opts.qfactors = pppara.qfactors
opts.qweights = pppara.qweights
opts.hdrsel = pppara.hdrsel
opts.pick_on = True
initQual(gsac.saclist, opts.hdrsel, opts.qheaders)
sortSeis(gsac, opts)
return gsac, opts
def main():
gsac, opts = getDataOpts()
axs = getAxes(opts)
ppm = PickPhaseMenu(gsac, opts, axs)
if __name__ == "__main__":
main()
show()
| gpl-3.0 |
nik7273/computational-medical-knowledge | src/listFreqs.py | 2 | 1765 | # -*- coding: utf-8 -*-
"Get frequencies and plot them"
from pdfParser import pdfparser
import nltk, matplotlib, numpy, pylab, string, codecs
from plotSave import plot_and_save
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
stop = stopwords.words('english')
READ = 'rb'
WRITE = 'wb'
lemma = nltk.WordNetLemmatizer()
punkt = set(string.punctuation)
d={}
with open("abbreviation_reference.txt",READ) as f:
d = dict(x.split(' ') for x in f)
measures = ['cm', 'in', 'mg', 'lb', 'kg', 'mm', 'ft']
def fromPDFtoText(infile, outfile):
if ".pdf" in infile:
with codecs.open(outfile,WRITE,'utf-8') as outfile:
print>>outfile,pdfparser(infile)
return outfile
else:
return infile
def getAndListFreqs(textfile, wordsFile, listFile):
#gets frequencies of words and lists them in outer file
data = [word.lower() for word in word_tokenize(codecs.open(textfile,READ,'utf-8').read()) if word not in punkt]
for item in data:
if item in d.keys():
data[data.index(item)] = d[item]
item.replace("_"," ")
word_tokenize(item)
data = [lemma.lemmatize(word) for word in data]
data = [word for word in data if word not in stop]
data = [word for word in data if word not in measures]
with codecs.open(wordsFile,WRITE,'utf-8') as outfile:
for word in data:
print>>outfile,word
distribution = nltk.FreqDist(codecs.open(wordsFile, READ).read().splitlines())
commonWord = distribution.most_common(30)
words,freqs = zip(*commonWord)
with codecs.open(listFile,WRITE,'utf-8') as outfile:
for x,y in commonWord:
print>>outfile, "%s %s" % (y, x)
return {"words": words, "freqs": freqs}
| apache-2.0 |
fweik/espresso | samples/dancing.py | 3 | 2760 | #
# Copyright (C) 2019-2020 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Stokesian Dynamics simulation of particle sedimentation.
Reproduce the trajectory in Figure 5b from :cite:`durlofsky87a`.
"""
import espressomd
import espressomd.constraints
import espressomd.observables
import espressomd.accumulators
import numpy as np
import matplotlib.pyplot as plt
import argparse
parser = argparse.ArgumentParser(epilog=__doc__)
group = parser.add_mutually_exclusive_group()
group.add_argument('--ft', action='store_true', help='Use FT approximation')
group.add_argument('--fts', action='store_true', help='Use FTS approximation')
args = parser.parse_args()
if args.ft:
print("Using FT approximation method")
sd_method = "ft"
else:
print("Using FTS approximation method")
sd_method = "fts"
espressomd.assert_features("STOKESIAN_DYNAMICS")
system = espressomd.System(box_l=[10, 10, 10])
system.time_step = 1.5
system.cell_system.skin = 0.4
system.periodicity = [False, False, False]
system.integrator.set_stokesian_dynamics(
viscosity=1.0, radii={0: 1.0}, approximation_method=sd_method)
system.part.add(pos=[-5, 0, 0], rotation=[1, 1, 1])
system.part.add(pos=[0, 0, 0], rotation=[1, 1, 1])
system.part.add(pos=[7, 0, 0], rotation=[1, 1, 1])
gravity = espressomd.constraints.Gravity(g=[0, -1, 0])
system.constraints.add(gravity)
obs = espressomd.observables.ParticlePositions(ids=system.part[:].id)
acc = espressomd.accumulators.TimeSeries(obs=obs, delta_N=1)
system.auto_update_accumulators.add(acc)
acc.update()
intsteps = int(10500 / system.time_step)
system.integrator.run(intsteps)
positions = acc.time_series()
ref_data = "../testsuite/python/data/dancing.txt"
data = np.loadtxt(ref_data)
for i in range(3):
plt.plot(positions[:, i, 0], positions[:, i, 1], linestyle='solid')
plt.gca().set_prop_cycle(None)
for i in range(0, 6, 2):
plt.plot(data[:, i], data[:, i + 1], linestyle='dashed')
plt.title("Trajectory of sedimenting spheres\nsolid line: simulation "
"({}), dashed line: paper (FTS)".format(sd_method.upper()))
plt.xlabel("x")
plt.ylabel("y")
plt.show()
| gpl-3.0 |
yask123/scikit-learn | sklearn/tree/tests/test_tree.py | 48 | 47506 | """
Testing for the tree module (sklearn.tree).
"""
import pickle
from functools import partial
from itertools import product
import platform
import numpy as np
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import raises
from sklearn.utils.validation import check_random_state
from sklearn.utils.validation import NotFittedError
from sklearn.utils.testing import ignore_warnings
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import ExtraTreeClassifier
from sklearn.tree import ExtraTreeRegressor
from sklearn import tree
from sklearn.tree.tree import SPARSE_SPLITTERS
from sklearn.tree._tree import TREE_LEAF
from sklearn import datasets
from sklearn.preprocessing._weights import _balance_weights
CLF_CRITERIONS = ("gini", "entropy")
REG_CRITERIONS = ("mse", )
CLF_TREES = {
"DecisionTreeClassifier": DecisionTreeClassifier,
"Presort-DecisionTreeClassifier": partial(DecisionTreeClassifier,
splitter="presort-best"),
"ExtraTreeClassifier": ExtraTreeClassifier,
}
REG_TREES = {
"DecisionTreeRegressor": DecisionTreeRegressor,
"Presort-DecisionTreeRegressor": partial(DecisionTreeRegressor,
splitter="presort-best"),
"ExtraTreeRegressor": ExtraTreeRegressor,
}
ALL_TREES = dict()
ALL_TREES.update(CLF_TREES)
ALL_TREES.update(REG_TREES)
SPARSE_TREES = [name for name, Tree in ALL_TREES.items()
if Tree().splitter in SPARSE_SPLITTERS]
X_small = np.array([
[0, 0, 4, 0, 0, 0, 1, -14, 0, -4, 0, 0, 0, 0, ],
[0, 0, 5, 3, 0, -4, 0, 0, 1, -5, 0.2, 0, 4, 1, ],
[-1, -1, 0, 0, -4.5, 0, 0, 2.1, 1, 0, 0, -4.5, 0, 1, ],
[-1, -1, 0, -1.2, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 1, ],
[-1, -1, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1, ],
[-1, -2, 0, 4, -3, 10, 4, 0, -3.2, 0, 4, 3, -4, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -1, 0, ],
[2, 8, 5, 1, 0.5, -4, 10, 0, 1, -5, 3, 0, 2, 0, ],
[2, 0, 1, 1, 1, -1, 1, 0, 0, -2, 3, 0, 1, 0, ],
[2, 0, 1, 2, 3, -1, 10, 2, 0, -1, 1, 2, 2, 0, ],
[1, 1, 0, 2, 2, -1, 1, 2, 0, -5, 1, 2, 3, 0, ],
[3, 1, 0, 3, 0, -4, 10, 0, 1, -5, 3, 0, 3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 1.5, 1, -1, -1, ],
[2.11, 8, -6, -0.5, 0, 10, 0, 0, -3.2, 6, 0.5, 0, -1, -1, ],
[2, 0, 5, 1, 0.5, -2, 10, 0, 1, -5, 3, 1, 0, -1, ],
[2, 0, 1, 1, 1, -2, 1, 0, 0, -2, 0, 0, 0, 1, ],
[2, 1, 1, 1, 2, -1, 10, 2, 0, -1, 0, 2, 1, 1, ],
[1, 1, 0, 0, 1, -3, 1, 2, 0, -5, 1, 2, 1, 1, ],
[3, 1, 0, 1, 0, -4, 1, 0, 1, -2, 0, 0, 1, 0, ]])
y_small = [1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0,
0, 0]
y_small_reg = [1.0, 2.1, 1.2, 0.05, 10, 2.4, 3.1, 1.01, 0.01, 2.98, 3.1, 1.1,
0.0, 1.2, 2, 11, 0, 0, 4.5, 0.201, 1.06, 0.9, 0]
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
random_state = check_random_state(0)
X_multilabel, y_multilabel = datasets.make_multilabel_classification(
random_state=0, n_samples=30, n_features=10)
X_sparse_pos = random_state.uniform(size=(20, 5))
X_sparse_pos[X_sparse_pos <= 0.8] = 0.
y_random = random_state.randint(0, 4, size=(20, ))
X_sparse_mix = sparse_random_matrix(20, 10, density=0.25, random_state=0)
DATASETS = {
"iris": {"X": iris.data, "y": iris.target},
"boston": {"X": boston.data, "y": boston.target},
"digits": {"X": digits.data, "y": digits.target},
"toy": {"X": X, "y": y},
"clf_small": {"X": X_small, "y": y_small},
"reg_small": {"X": X_small, "y": y_small_reg},
"multilabel": {"X": X_multilabel, "y": y_multilabel},
"sparse-pos": {"X": X_sparse_pos, "y": y_random},
"sparse-neg": {"X": - X_sparse_pos, "y": y_random},
"sparse-mix": {"X": X_sparse_mix, "y": y_random},
"zeros": {"X": np.zeros((20, 3)), "y": y_random}
}
for name in DATASETS:
DATASETS[name]["X_sparse"] = csc_matrix(DATASETS[name]["X"])
def assert_tree_equal(d, s, message):
assert_equal(s.node_count, d.node_count,
"{0}: inequal number of node ({1} != {2})"
"".format(message, s.node_count, d.node_count))
assert_array_equal(d.children_right, s.children_right,
message + ": inequal children_right")
assert_array_equal(d.children_left, s.children_left,
message + ": inequal children_left")
external = d.children_right == TREE_LEAF
internal = np.logical_not(external)
assert_array_equal(d.feature[internal], s.feature[internal],
message + ": inequal features")
assert_array_equal(d.threshold[internal], s.threshold[internal],
message + ": inequal threshold")
assert_array_equal(d.n_node_samples.sum(), s.n_node_samples.sum(),
message + ": inequal sum(n_node_samples)")
assert_array_equal(d.n_node_samples, s.n_node_samples,
message + ": inequal n_node_samples")
assert_almost_equal(d.impurity, s.impurity,
err_msg=message + ": inequal impurity")
assert_array_almost_equal(d.value[external], s.value[external],
err_msg=message + ": inequal value")
def test_classification_toy():
# Check classification on a toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_weighted_classification_toy():
# Check classification on a weighted toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y, sample_weight=np.ones(len(X)))
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf.fit(X, y, sample_weight=np.ones(len(X)) * 0.5)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_regression_toy():
# Check regression on a toy dataset.
for name, Tree in REG_TREES.items():
reg = Tree(random_state=1)
reg.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
def test_xor():
# Check on a XOR problem
y = np.zeros((10, 10))
y[:5, :5] = 1
y[5:, 5:] = 1
gridx, gridy = np.indices(y.shape)
X = np.vstack([gridx.ravel(), gridy.ravel()]).T
y = y.ravel()
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
clf = Tree(random_state=0, max_features=1)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
def test_iris():
# Check consistency on dataset iris.
for (name, Tree), criterion in product(CLF_TREES.items(), CLF_CRITERIONS):
clf = Tree(criterion=criterion, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.9,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
clf = Tree(criterion=criterion, max_features=2, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.5,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_boston():
# Check consistency on dataset boston house prices.
for (name, Tree), criterion in product(REG_TREES.items(), REG_CRITERIONS):
reg = Tree(criterion=criterion, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 1,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
# using fewer features reduces the learning ability of this tree,
# but reduces training time.
reg = Tree(criterion=criterion, max_features=6, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 2,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_probability():
# Predict probabilities using DecisionTreeClassifier.
for name, Tree in CLF_TREES.items():
clf = Tree(max_depth=1, max_features=1, random_state=42)
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(np.sum(prob_predict, 1),
np.ones(iris.data.shape[0]),
err_msg="Failed with {0}".format(name))
assert_array_equal(np.argmax(prob_predict, 1),
clf.predict(iris.data),
err_msg="Failed with {0}".format(name))
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8,
err_msg="Failed with {0}".format(name))
def test_arrayrepr():
# Check the array representation.
# Check resize
X = np.arange(10000)[:, np.newaxis]
y = np.arange(10000)
for name, Tree in REG_TREES.items():
reg = Tree(max_depth=None, random_state=0)
reg.fit(X, y)
def test_pure_set():
# Check when y is pure.
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [1, 1, 1, 1, 1, 1]
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(X, y)
assert_almost_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
def test_numerical_stability():
# Check numerical stability.
X = np.array([
[152.08097839, 140.40744019, 129.75102234, 159.90493774],
[142.50700378, 135.81935120, 117.82884979, 162.75781250],
[127.28772736, 140.40744019, 129.75102234, 159.90493774],
[132.37025452, 143.71923828, 138.35694885, 157.84558105],
[103.10237122, 143.71928406, 138.35696411, 157.84559631],
[127.71276855, 143.71923828, 138.35694885, 157.84558105],
[120.91514587, 140.40744019, 129.75102234, 159.90493774]])
y = np.array(
[1., 0.70209277, 0.53896582, 0., 0.90914464, 0.48026916, 0.49622521])
with np.errstate(all="raise"):
for name, Tree in REG_TREES.items():
reg = Tree(random_state=0)
reg.fit(X, y)
reg.fit(X, -y)
reg.fit(-X, y)
reg.fit(-X, -y)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10, "Failed with {0}".format(name))
assert_equal(n_important, 3, "Failed with {0}".format(name))
X_new = clf.transform(X, threshold="mean")
assert_less(0, X_new.shape[1], "Failed with {0}".format(name))
assert_less(X_new.shape[1], X.shape[1], "Failed with {0}".format(name))
# Check on iris that importances are the same for all builders
clf = DecisionTreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
clf2 = DecisionTreeClassifier(random_state=0,
max_leaf_nodes=len(iris.data))
clf2.fit(iris.data, iris.target)
assert_array_equal(clf.feature_importances_,
clf2.feature_importances_)
@raises(ValueError)
def test_importances_raises():
# Check if variable importance before fit raises ValueError.
clf = DecisionTreeClassifier()
clf.feature_importances_
def test_importances_gini_equal_mse():
# Check that gini is equivalent to mse for binary output variable
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
# The gini index and the mean square error (variance) might differ due
# to numerical instability. Since those instabilities mainly occurs at
# high tree depth, we restrict this maximal depth.
clf = DecisionTreeClassifier(criterion="gini", max_depth=5,
random_state=0).fit(X, y)
reg = DecisionTreeRegressor(criterion="mse", max_depth=5,
random_state=0).fit(X, y)
assert_almost_equal(clf.feature_importances_, reg.feature_importances_)
assert_array_equal(clf.tree_.feature, reg.tree_.feature)
assert_array_equal(clf.tree_.children_left, reg.tree_.children_left)
assert_array_equal(clf.tree_.children_right, reg.tree_.children_right)
assert_array_equal(clf.tree_.n_node_samples, reg.tree_.n_node_samples)
def test_max_features():
# Check max_features.
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(max_features="auto")
reg.fit(boston.data, boston.target)
assert_equal(reg.max_features_, boston.data.shape[1])
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(max_features="auto")
clf.fit(iris.data, iris.target)
assert_equal(clf.max_features_, 2)
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_features="sqrt")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.sqrt(iris.data.shape[1])))
est = TreeEstimator(max_features="log2")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.log2(iris.data.shape[1])))
est = TreeEstimator(max_features=1)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=3)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 3)
est = TreeEstimator(max_features=0.01)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=0.5)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(0.5 * iris.data.shape[1]))
est = TreeEstimator(max_features=1.0)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
est = TreeEstimator(max_features=None)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
# use values of max_features that are invalid
est = TreeEstimator(max_features=10)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=-1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=0.0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=1.5)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features="foobar")
assert_raises(ValueError, est.fit, X, y)
def test_error():
# Test that it gives proper exception on deficient input.
for name, TreeEstimator in CLF_TREES.items():
# predict before fit
est = TreeEstimator()
assert_raises(NotFittedError, est.predict_proba, X)
est.fit(X, y)
X2 = [[-2, -1, 1]] # wrong feature shape for sample
assert_raises(ValueError, est.predict_proba, X2)
for name, TreeEstimator in ALL_TREES.items():
# Invalid values for parameters
assert_raises(ValueError, TreeEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=-1).fit,
X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=0.51).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=-1).fit,
X, y)
assert_raises(ValueError, TreeEstimator(max_depth=-1).fit, X, y)
assert_raises(ValueError, TreeEstimator(max_features=42).fit, X, y)
# Wrong dimensions
est = TreeEstimator()
y2 = y[:-1]
assert_raises(ValueError, est.fit, X, y2)
# Test with arrays that are non-contiguous.
Xf = np.asfortranarray(X)
est = TreeEstimator()
est.fit(Xf, y)
assert_almost_equal(est.predict(T), true_result)
# predict before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.predict, T)
# predict on vector with different dims
est.fit(X, y)
t = np.asarray(T)
assert_raises(ValueError, est.predict, t[:, 1:])
# wrong sample shape
Xt = np.array(X).T
est = TreeEstimator()
est.fit(np.dot(X, Xt), y)
assert_raises(ValueError, est.predict, X)
assert_raises(ValueError, est.apply, X)
clf = TreeEstimator()
clf.fit(X, y)
assert_raises(ValueError, clf.predict, Xt)
assert_raises(ValueError, clf.apply, Xt)
# apply before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.apply, T)
def test_min_samples_leaf():
# Test if leaves contain more than leaf_count training examples
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def check_min_weight_fraction_leaf(name, datasets, sparse=False):
"""Test if leaves contain at least min_weight_fraction_leaf of the
training set"""
if sparse:
X = DATASETS[datasets]["X_sparse"].astype(np.float32)
else:
X = DATASETS[datasets]["X"].astype(np.float32)
y = DATASETS[datasets]["y"]
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
TreeEstimator = ALL_TREES[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)):
est = TreeEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y, sample_weight=weights)
if sparse:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
# Check on dense input
for name in ALL_TREES:
yield check_min_weight_fraction_leaf, name, "iris"
# Check on sparse input
for name in SPARSE_TREES:
yield check_min_weight_fraction_leaf, name, "multilabel", True
def test_pickle():
# Check that tree estimator are pickable
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
serialized_object = pickle.dumps(clf)
clf2 = pickle.loads(serialized_object)
assert_equal(type(clf2), clf.__class__)
score2 = clf2.score(iris.data, iris.target)
assert_equal(score, score2, "Failed to generate same score "
"after pickling (classification) "
"with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(boston.data, boston.target)
score = reg.score(boston.data, boston.target)
serialized_object = pickle.dumps(reg)
reg2 = pickle.loads(serialized_object)
assert_equal(type(reg2), reg.__class__)
score2 = reg2.score(boston.data, boston.target)
assert_equal(score, score2, "Failed to generate same score "
"after pickling (regression) "
"with {0}".format(name))
def test_multioutput():
# Check estimators on multi-output problems.
X = [[-2, -1],
[-1, -1],
[-1, -2],
[1, 1],
[1, 2],
[2, 1],
[-2, 1],
[-1, 1],
[-1, 2],
[2, -1],
[1, -1],
[1, -2]]
y = [[-1, 0],
[-1, 0],
[-1, 0],
[1, 1],
[1, 1],
[1, 1],
[-1, 2],
[-1, 2],
[-1, 2],
[1, 3],
[1, 3],
[1, 3]]
T = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_true = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
# toy classification problem
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
y_hat = clf.fit(X, y).predict(T)
assert_array_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
proba = clf.predict_proba(T)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = clf.predict_log_proba(T)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
# toy regression problem
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
y_hat = reg.fit(X, y).predict(T)
assert_almost_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
def test_classes_shape():
# Test that n_classes_ and classes_ have proper shape.
for name, TreeClassifier in CLF_TREES.items():
# Classification, single output
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = TreeClassifier(random_state=0)
clf.fit(X, _y)
assert_equal(len(clf.n_classes_), 2)
assert_equal(len(clf.classes_), 2)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_unbalanced_iris():
# Check class rebalancing.
unbalanced_X = iris.data[:125]
unbalanced_y = iris.target[:125]
sample_weight = _balance_weights(unbalanced_y)
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(unbalanced_X, unbalanced_y, sample_weight=sample_weight)
assert_almost_equal(clf.predict(unbalanced_X), unbalanced_y)
def test_memory_layout():
# Check that it works no matter the memory layout
for (name, TreeEstimator), dtype in product(ALL_TREES.items(),
[np.float64, np.float32]):
est = TreeEstimator(random_state=0)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_sample_weight():
# Check sample weighting.
# Test that zero-weighted samples are not taken into account
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
sample_weight = np.ones(100)
sample_weight[y == 0] = 0.0
clf = DecisionTreeClassifier(random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), np.ones(100))
# Test that low weighted samples are not taken into account at low depth
X = np.arange(200)[:, np.newaxis]
y = np.zeros(200)
y[50:100] = 1
y[100:200] = 2
X[100:200, 0] = 200
sample_weight = np.ones(200)
sample_weight[y == 2] = .51 # Samples of class '2' are still weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 149.5)
sample_weight[y == 2] = .5 # Samples of class '2' are no longer weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 49.5) # Threshold should have moved
# Test that sample weighting is the same as having duplicates
X = iris.data
y = iris.target
duplicates = rng.randint(0, X.shape[0], 100)
clf = DecisionTreeClassifier(random_state=1)
clf.fit(X[duplicates], y[duplicates])
sample_weight = np.bincount(duplicates, minlength=X.shape[0])
clf2 = DecisionTreeClassifier(random_state=1)
clf2.fit(X, y, sample_weight=sample_weight)
internal = clf.tree_.children_left != tree._tree.TREE_LEAF
assert_array_almost_equal(clf.tree_.threshold[internal],
clf2.tree_.threshold[internal])
def test_sample_weight_invalid():
# Check sample weighting raises errors.
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
clf = DecisionTreeClassifier(random_state=0)
sample_weight = np.random.rand(100, 1)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.array(0)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(101)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(99)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
def check_class_weights(name):
"""Check class_weights resemble sample_weights behavior."""
TreeClassifier = CLF_TREES[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = TreeClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = TreeClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "auto" which should also have no effect
clf4 = TreeClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in CLF_TREES:
yield check_class_weights, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
TreeClassifier = CLF_TREES[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = TreeClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = TreeClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = TreeClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in CLF_TREES:
yield check_class_weight_errors, name
def test_max_leaf_nodes():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=None, max_leaf_nodes=k + 1).fit(X, y)
tree = est.tree_
assert_equal((tree.children_left == TREE_LEAF).sum(), k + 1)
# max_leaf_nodes in (0, 1) should raise ValueError
est = TreeEstimator(max_depth=None, max_leaf_nodes=0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=0.1)
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test preceedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.tree_
assert_greater(tree.max_depth, 1)
def test_arrays_persist():
# Ensure property arrays' memory stays alive when tree disappears
# non-regression for #2726
for attr in ['n_classes', 'value', 'children_left', 'children_right',
'threshold', 'impurity', 'feature', 'n_node_samples']:
value = getattr(DecisionTreeClassifier().fit([[0]], [0]).tree_, attr)
# if pointing to freed memory, contents may be arbitrary
assert_true(-2 <= value.flat[0] < 2,
'Array points to arbitrary memory')
def test_only_constant_features():
random_state = check_random_state(0)
X = np.zeros((10, 20))
y = random_state.randint(0, 2, (10, ))
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(random_state=0)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 0)
def test_with_only_one_non_constant_features():
X = np.hstack([np.array([[1.], [1.], [0.], [0.]]),
np.zeros((4, 1000))])
y = np.array([0., 1., 0., 1.0])
for name, TreeEstimator in CLF_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict_proba(X), 0.5 * np.ones((4, 2)))
for name, TreeEstimator in REG_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict(X), 0.5 * np.ones((4, )))
def test_big_input():
# Test if the warning for too large inputs is appropriate.
X = np.repeat(10 ** 40., 4).astype(np.float64).reshape(-1, 1)
clf = DecisionTreeClassifier()
try:
clf.fit(X, [0, 1, 0, 1])
except ValueError as e:
assert_in("float32", str(e))
def test_realloc():
from sklearn.tree._utils import _realloc_test
assert_raises(MemoryError, _realloc_test)
def test_huge_allocations():
n_bits = int(platform.architecture()[0].rstrip('bit'))
X = np.random.randn(10, 2)
y = np.random.randint(0, 2, 10)
# Sanity check: we cannot request more memory than the size of the address
# space. Currently raises OverflowError.
huge = 2 ** (n_bits + 1)
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(Exception, clf.fit, X, y)
# Non-regression test: MemoryError used to be dropped by Cython
# because of missing "except *".
huge = 2 ** (n_bits - 1) - 1
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(MemoryError, clf.fit, X, y)
def check_sparse_input(tree, dataset, max_depth=None):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Gain testing time
if dataset in ["digits", "boston"]:
n_samples = X.shape[0] // 5
X = X[:n_samples]
X_sparse = X_sparse[:n_samples]
y = y[:n_samples]
for sparse_format in (csr_matrix, csc_matrix, coo_matrix):
X_sparse = sparse_format(X_sparse)
# Check the default (depth first search)
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
y_pred = d.predict(X)
if tree in CLF_TREES:
y_proba = d.predict_proba(X)
y_log_proba = d.predict_log_proba(X)
for sparse_matrix in (csr_matrix, csc_matrix, coo_matrix):
X_sparse_test = sparse_matrix(X_sparse, dtype=np.float32)
assert_array_almost_equal(s.predict(X_sparse_test), y_pred)
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X_sparse_test),
y_proba)
assert_array_almost_equal(s.predict_log_proba(X_sparse_test),
y_log_proba)
def test_sparse_input():
for tree, dataset in product(SPARSE_TREES,
("clf_small", "toy", "digits", "multilabel",
"sparse-pos", "sparse-neg", "sparse-mix",
"zeros")):
max_depth = 3 if dataset == "digits" else None
yield (check_sparse_input, tree, dataset, max_depth)
# Due to numerical instability of MSE and too strict test, we limit the
# maximal depth
for tree, dataset in product(REG_TREES, ["boston", "reg_small"]):
if tree in SPARSE_TREES:
yield (check_sparse_input, tree, dataset, 2)
def check_sparse_parameters(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check max_features
d = TreeEstimator(random_state=0, max_features=1, max_depth=2).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
max_depth=2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_split
d = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_leaf
d = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X, y)
s = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check best-first search
d = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X, y)
s = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_parameters():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_parameters, tree, dataset)
def check_sparse_criterion(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check various criterion
CRITERIONS = REG_CRITERIONS if tree in REG_TREES else CLF_CRITERIONS
for criterion in CRITERIONS:
d = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_criterion():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_criterion, tree, dataset)
def check_explicit_sparse_zeros(tree, max_depth=3,
n_features=10):
TreeEstimator = ALL_TREES[tree]
# n_samples set n_feature to ease construction of a simultaneous
# construction of a csr and csc matrix
n_samples = n_features
samples = np.arange(n_samples)
# Generate X, y
random_state = check_random_state(0)
indices = []
data = []
offset = 0
indptr = [offset]
for i in range(n_features):
n_nonzero_i = random_state.binomial(n_samples, 0.5)
indices_i = random_state.permutation(samples)[:n_nonzero_i]
indices.append(indices_i)
data_i = random_state.binomial(3, 0.5, size=(n_nonzero_i, )) - 1
data.append(data_i)
offset += n_nonzero_i
indptr.append(offset)
indices = np.concatenate(indices)
data = np.array(np.concatenate(data), dtype=np.float32)
X_sparse = csc_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X = X_sparse.toarray()
X_sparse_test = csr_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X_test = X_sparse_test.toarray()
y = random_state.randint(0, 3, size=(n_samples, ))
# Ensure that X_sparse_test owns its data, indices and indptr array
X_sparse_test = X_sparse_test.copy()
# Ensure that we have explicit zeros
assert_greater((X_sparse.data == 0.).sum(), 0)
assert_greater((X_sparse_test.data == 0.).sum(), 0)
# Perform the comparison
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
Xs = (X_test, X_sparse_test)
for X1, X2 in product(Xs, Xs):
assert_array_almost_equal(s.tree_.apply(X1), d.tree_.apply(X2))
assert_array_almost_equal(s.apply(X1), d.apply(X2))
assert_array_almost_equal(s.apply(X1), s.tree_.apply(X1))
assert_array_almost_equal(s.predict(X1), d.predict(X2))
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X1),
d.predict_proba(X2))
def test_explicit_sparse_zeros():
for tree in SPARSE_TREES:
yield (check_explicit_sparse_zeros, tree)
@ignore_warnings
def check_raise_error_on_1d_input(name):
TreeEstimator = ALL_TREES[name]
X = iris.data[:, 0].ravel()
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
assert_raises(ValueError, TreeEstimator(random_state=0).fit, X, y)
est = TreeEstimator(random_state=0)
est.fit(X_2d, y)
assert_raises(ValueError, est.predict, [X])
@ignore_warnings
def test_1d_input():
for name in ALL_TREES:
yield check_raise_error_on_1d_input, name
def _check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight):
# Private function to keep pretty printing in nose yielded tests
est = TreeEstimator(random_state=0)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 1)
est = TreeEstimator(random_state=0, min_weight_fraction_leaf=0.4)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 0)
def check_min_weight_leaf_split_level(name):
TreeEstimator = ALL_TREES[name]
X = np.array([[0], [0], [0], [0], [1]])
y = [0, 0, 0, 0, 1]
sample_weight = [0.2, 0.2, 0.2, 0.2, 0.2]
_check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight)
if TreeEstimator().splitter in SPARSE_SPLITTERS:
_check_min_weight_leaf_split_level(TreeEstimator, csc_matrix(X), y,
sample_weight)
def test_min_weight_leaf_split_level():
for name in ALL_TREES:
yield check_min_weight_leaf_split_level, name
def check_public_apply(name):
X_small32 = X_small.astype(tree._tree.DTYPE)
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def check_public_apply_sparse(name):
X_small32 = csr_matrix(X_small.astype(tree._tree.DTYPE))
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def test_public_apply():
for name in ALL_TREES:
yield (check_public_apply, name)
for name in SPARSE_TREES:
yield (check_public_apply_sparse, name)
| bsd-3-clause |
louispotok/pandas | pandas/tests/indexes/test_base.py | 1 | 95450 | # -*- coding: utf-8 -*-
import pytest
from datetime import datetime, timedelta
from collections import defaultdict
import pandas.util.testing as tm
from pandas.core.dtypes.generic import ABCIndex
from pandas.core.dtypes.common import is_unsigned_integer_dtype
from pandas.core.indexes.api import Index, MultiIndex
from pandas.tests.indexes.common import Base
from pandas.compat import (range, lrange, lzip, u,
text_type, zip, PY3, PY35, PY36, PYPY, StringIO)
import operator
import numpy as np
from pandas import (period_range, date_range, Series,
DataFrame, Float64Index, Int64Index, UInt64Index,
CategoricalIndex, DatetimeIndex, TimedeltaIndex,
PeriodIndex, RangeIndex, isna)
from pandas.core.index import _get_combined_index, _ensure_index_from_sequences
from pandas.util.testing import assert_almost_equal
from pandas.compat.numpy import np_datetime64_compat
import pandas.core.config as cf
from pandas.core.indexes.datetimes import _to_m8
import pandas as pd
from pandas._libs.tslib import Timestamp
class TestIndex(Base):
_holder = Index
def setup_method(self, method):
self.indices = dict(unicodeIndex=tm.makeUnicodeIndex(100),
strIndex=tm.makeStringIndex(100),
dateIndex=tm.makeDateIndex(100),
periodIndex=tm.makePeriodIndex(100),
tdIndex=tm.makeTimedeltaIndex(100),
intIndex=tm.makeIntIndex(100),
uintIndex=tm.makeUIntIndex(100),
rangeIndex=tm.makeRangeIndex(100),
floatIndex=tm.makeFloatIndex(100),
boolIndex=Index([True, False]),
catIndex=tm.makeCategoricalIndex(100),
empty=Index([]),
tuples=MultiIndex.from_tuples(lzip(
['foo', 'bar', 'baz'], [1, 2, 3])),
repeats=Index([0, 0, 1, 1, 2, 2]))
self.setup_indices()
def create_index(self):
return Index(list('abcde'))
def generate_index_types(self, skip_index_keys=[]):
"""
Return a generator of the various index types, leaving
out the ones with a key in skip_index_keys
"""
for key, index in self.indices.items():
if key not in skip_index_keys:
yield key, index
def test_can_hold_identifiers(self):
index = self.create_index()
key = index[0]
assert index._can_hold_identifiers_and_holds_name(key) is True
def test_new_axis(self):
new_index = self.dateIndex[None, :]
assert new_index.ndim == 2
assert isinstance(new_index, np.ndarray)
def test_copy_and_deepcopy(self, indices):
super(TestIndex, self).test_copy_and_deepcopy(indices)
new_copy2 = self.intIndex.copy(dtype=int)
assert new_copy2.dtype.kind == 'i'
@pytest.mark.parametrize("attr", ['strIndex', 'dateIndex'])
def test_constructor_regular(self, attr):
# regular instance creation
index = getattr(self, attr)
tm.assert_contains_all(index, index)
def test_constructor_casting(self):
# casting
arr = np.array(self.strIndex)
index = Index(arr)
tm.assert_contains_all(arr, index)
tm.assert_index_equal(self.strIndex, index)
def test_constructor_copy(self):
# copy
arr = np.array(self.strIndex)
index = Index(arr, copy=True, name='name')
assert isinstance(index, Index)
assert index.name == 'name'
tm.assert_numpy_array_equal(arr, index.values)
arr[0] = "SOMEBIGLONGSTRING"
assert index[0] != "SOMEBIGLONGSTRING"
# what to do here?
# arr = np.array(5.)
# pytest.raises(Exception, arr.view, Index)
def test_constructor_corner(self):
# corner case
pytest.raises(TypeError, Index, 0)
@pytest.mark.parametrize("index_vals", [
[('A', 1), 'B'], ['B', ('A', 1)]])
def test_construction_list_mixed_tuples(self, index_vals):
# see gh-10697: if we are constructing from a mixed list of tuples,
# make sure that we are independent of the sorting order.
index = Index(index_vals)
assert isinstance(index, Index)
assert not isinstance(index, MultiIndex)
@pytest.mark.parametrize('na_value', [None, np.nan])
@pytest.mark.parametrize('vtype', [list, tuple, iter])
def test_construction_list_tuples_nan(self, na_value, vtype):
# GH 18505 : valid tuples containing NaN
values = [(1, 'two'), (3., na_value)]
result = Index(vtype(values))
expected = MultiIndex.from_tuples(values)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("cast_as_obj", [True, False])
@pytest.mark.parametrize("index", [
pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern'), # DTI with tz
pd.date_range('2015-01-01 10:00', freq='D', periods=3), # DTI no tz
pd.timedelta_range('1 days', freq='D', periods=3), # td
pd.period_range('2015-01-01', freq='D', periods=3) # period
])
def test_constructor_from_index_dtlike(self, cast_as_obj, index):
if cast_as_obj:
result = pd.Index(index.astype(object))
else:
result = pd.Index(index)
tm.assert_index_equal(result, index)
if isinstance(index, pd.DatetimeIndex) and hasattr(index, 'tz'):
assert result.tz == index.tz
@pytest.mark.parametrize("index,has_tz", [
(pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern'), True), # datetimetz
(pd.timedelta_range('1 days', freq='D', periods=3), False), # td
(pd.period_range('2015-01-01', freq='D', periods=3), False) # period
])
def test_constructor_from_series_dtlike(self, index, has_tz):
result = pd.Index(pd.Series(index))
tm.assert_index_equal(result, index)
if has_tz:
assert result.tz == index.tz
@pytest.mark.parametrize("klass", [Index, DatetimeIndex])
def test_constructor_from_series(self, klass):
expected = DatetimeIndex([Timestamp('20110101'), Timestamp('20120101'),
Timestamp('20130101')])
s = Series([Timestamp('20110101'), Timestamp('20120101'),
Timestamp('20130101')])
result = klass(s)
tm.assert_index_equal(result, expected)
def test_constructor_from_series_freq(self):
# GH 6273
# create from a series, passing a freq
dts = ['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']
expected = DatetimeIndex(dts, freq='MS')
s = Series(pd.to_datetime(dts))
result = DatetimeIndex(s, freq='MS')
tm.assert_index_equal(result, expected)
def test_constructor_from_frame_series_freq(self):
# GH 6273
# create from a series, passing a freq
dts = ['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']
expected = DatetimeIndex(dts, freq='MS')
df = pd.DataFrame(np.random.rand(5, 3))
df['date'] = dts
result = DatetimeIndex(df['date'], freq='MS')
assert df['date'].dtype == object
expected.name = 'date'
tm.assert_index_equal(result, expected)
expected = pd.Series(dts, name='date')
tm.assert_series_equal(df['date'], expected)
# GH 6274
# infer freq of same
freq = pd.infer_freq(df['date'])
assert freq == 'MS'
@pytest.mark.parametrize("array", [
np.arange(5), np.array(['a', 'b', 'c']), date_range(
'2000-01-01', periods=3).values
])
def test_constructor_ndarray_like(self, array):
# GH 5460#issuecomment-44474502
# it should be possible to convert any object that satisfies the numpy
# ndarray interface directly into an Index
class ArrayLike(object):
def __init__(self, array):
self.array = array
def __array__(self, dtype=None):
return self.array
expected = pd.Index(array)
result = pd.Index(ArrayLike(array))
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('dtype', [
int, 'int64', 'int32', 'int16', 'int8', 'uint64', 'uint32',
'uint16', 'uint8'])
def test_constructor_int_dtype_float(self, dtype):
# GH 18400
if is_unsigned_integer_dtype(dtype):
index_type = UInt64Index
else:
index_type = Int64Index
expected = index_type([0, 1, 2, 3])
result = Index([0., 1., 2., 3.], dtype=dtype)
tm.assert_index_equal(result, expected)
def test_constructor_int_dtype_nan(self):
# see gh-15187
data = [np.nan]
expected = Float64Index(data)
result = Index(data, dtype='float')
tm.assert_index_equal(result, expected)
def test_droplevel(self, indices):
# GH 21115
if isinstance(indices, MultiIndex):
# Tested separately in test_multi.py
return
assert indices.droplevel([]).equals(indices)
for level in indices.name, [indices.name]:
if isinstance(indices.name, tuple) and level is indices.name:
# GH 21121 : droplevel with tuple name
continue
with pytest.raises(ValueError):
indices.droplevel(level)
for level in 'wrong', ['wrong']:
with pytest.raises(KeyError):
indices.droplevel(level)
@pytest.mark.parametrize("dtype", ['int64', 'uint64'])
def test_constructor_int_dtype_nan_raises(self, dtype):
# see gh-15187
data = [np.nan]
msg = "cannot convert"
with tm.assert_raises_regex(ValueError, msg):
Index(data, dtype=dtype)
@pytest.mark.parametrize("klass,dtype,na_val", [
(pd.Float64Index, np.float64, np.nan),
(pd.DatetimeIndex, 'datetime64[ns]', pd.NaT)
])
def test_index_ctor_infer_nan_nat(self, klass, dtype, na_val):
# GH 13467
na_list = [na_val, na_val]
expected = klass(na_list)
assert expected.dtype == dtype
result = Index(na_list)
tm.assert_index_equal(result, expected)
result = Index(np.array(na_list))
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("pos", [0, 1])
@pytest.mark.parametrize("klass,dtype,ctor", [
(pd.DatetimeIndex, 'datetime64[ns]', np.datetime64('nat')),
(pd.TimedeltaIndex, 'timedelta64[ns]', np.timedelta64('nat'))
])
def test_index_ctor_infer_nat_dt_like(self, pos, klass, dtype, ctor,
nulls_fixture):
expected = klass([pd.NaT, pd.NaT])
assert expected.dtype == dtype
data = [ctor]
data.insert(pos, nulls_fixture)
result = Index(data)
tm.assert_index_equal(result, expected)
result = Index(np.array(data, dtype=object))
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("swap_objs", [True, False])
def test_index_ctor_nat_result(self, swap_objs):
# mixed np.datetime64/timedelta64 nat results in object
data = [np.datetime64('nat'), np.timedelta64('nat')]
if swap_objs:
data = data[::-1]
expected = pd.Index(data, dtype=object)
tm.assert_index_equal(Index(data), expected)
tm.assert_index_equal(Index(np.array(data, dtype=object)), expected)
def test_index_ctor_infer_periodindex(self):
xp = period_range('2012-1-1', freq='M', periods=3)
rs = Index(xp)
tm.assert_index_equal(rs, xp)
assert isinstance(rs, PeriodIndex)
@pytest.mark.parametrize("vals,dtype", [
([1, 2, 3, 4, 5], 'int'), ([1.1, np.nan, 2.2, 3.0], 'float'),
(['A', 'B', 'C', np.nan], 'obj')
])
def test_constructor_simple_new(self, vals, dtype):
index = Index(vals, name=dtype)
result = index._simple_new(index, dtype)
tm.assert_index_equal(result, index)
@pytest.mark.parametrize("vals", [
[1, 2, 3], np.array([1, 2, 3]), np.array([1, 2, 3], dtype=int),
# below should coerce
[1., 2., 3.], np.array([1., 2., 3.], dtype=float)
])
def test_constructor_dtypes_to_int64(self, vals):
index = Index(vals, dtype=int)
assert isinstance(index, Int64Index)
@pytest.mark.parametrize("vals", [
[1, 2, 3], [1., 2., 3.], np.array([1., 2., 3.]),
np.array([1, 2, 3], dtype=int), np.array([1., 2., 3.], dtype=float)
])
def test_constructor_dtypes_to_float64(self, vals):
index = Index(vals, dtype=float)
assert isinstance(index, Float64Index)
@pytest.mark.parametrize("cast_index", [True, False])
@pytest.mark.parametrize("vals", [
[True, False, True], np.array([True, False, True], dtype=bool)
])
def test_constructor_dtypes_to_object(self, cast_index, vals):
if cast_index:
index = Index(vals, dtype=bool)
else:
index = Index(vals)
assert isinstance(index, Index)
assert index.dtype == object
@pytest.mark.parametrize("vals", [
[1, 2, 3], np.array([1, 2, 3], dtype=int),
np.array([np_datetime64_compat('2011-01-01'),
np_datetime64_compat('2011-01-02')]),
[datetime(2011, 1, 1), datetime(2011, 1, 2)]
])
def test_constructor_dtypes_to_categorical(self, vals):
index = Index(vals, dtype='category')
assert isinstance(index, CategoricalIndex)
@pytest.mark.parametrize("cast_index", [True, False])
@pytest.mark.parametrize("vals", [
Index(np.array([np_datetime64_compat('2011-01-01'),
np_datetime64_compat('2011-01-02')])),
Index([datetime(2011, 1, 1), datetime(2011, 1, 2)])
])
def test_constructor_dtypes_to_datetime(self, cast_index, vals):
if cast_index:
index = Index(vals, dtype=object)
assert isinstance(index, Index)
assert index.dtype == object
else:
index = Index(vals)
assert isinstance(index, DatetimeIndex)
@pytest.mark.parametrize("cast_index", [True, False])
@pytest.mark.parametrize("vals", [
np.array([np.timedelta64(1, 'D'), np.timedelta64(1, 'D')]),
[timedelta(1), timedelta(1)]
])
def test_constructor_dtypes_to_timedelta(self, cast_index, vals):
if cast_index:
index = Index(vals, dtype=object)
assert isinstance(index, Index)
assert index.dtype == object
else:
index = Index(vals)
assert isinstance(index, TimedeltaIndex)
@pytest.mark.parametrize("attr, utc", [
['values', False],
['asi8', True]])
@pytest.mark.parametrize("klass", [pd.Index, pd.DatetimeIndex])
def test_constructor_dtypes_datetime(self, tz_naive_fixture, attr, utc,
klass):
# Test constructing with a datetimetz dtype
# .values produces numpy datetimes, so these are considered naive
# .asi8 produces integers, so these are considered epoch timestamps
index = pd.date_range('2011-01-01', periods=5)
arg = getattr(index, attr)
if utc:
index = index.tz_localize('UTC').tz_convert(tz_naive_fixture)
else:
index = index.tz_localize(tz_naive_fixture)
dtype = index.dtype
result = klass(arg, tz=tz_naive_fixture)
tm.assert_index_equal(result, index)
result = klass(arg, dtype=dtype)
tm.assert_index_equal(result, index)
result = klass(list(arg), tz=tz_naive_fixture)
tm.assert_index_equal(result, index)
result = klass(list(arg), dtype=dtype)
tm.assert_index_equal(result, index)
@pytest.mark.parametrize("attr", ['values', 'asi8'])
@pytest.mark.parametrize("klass", [pd.Index, pd.TimedeltaIndex])
def test_constructor_dtypes_timedelta(self, attr, klass):
index = pd.timedelta_range('1 days', periods=5)
dtype = index.dtype
values = getattr(index, attr)
result = klass(values, dtype=dtype)
tm.assert_index_equal(result, index)
result = klass(list(values), dtype=dtype)
tm.assert_index_equal(result, index)
@pytest.mark.parametrize("value", [[], iter([]), (x for x in [])])
@pytest.mark.parametrize("klass",
[Index, Float64Index, Int64Index, UInt64Index,
CategoricalIndex, DatetimeIndex, TimedeltaIndex])
def test_constructor_empty(self, value, klass):
empty = klass(value)
assert isinstance(empty, klass)
assert not len(empty)
@pytest.mark.parametrize("empty,klass", [
(PeriodIndex([], freq='B'), PeriodIndex),
(PeriodIndex(iter([]), freq='B'), PeriodIndex),
(PeriodIndex((x for x in []), freq='B'), PeriodIndex),
(RangeIndex(step=1), pd.RangeIndex),
(MultiIndex(levels=[[1, 2], ['blue', 'red']],
labels=[[], []]), MultiIndex)
])
def test_constructor_empty_special(self, empty, klass):
assert isinstance(empty, klass)
assert not len(empty)
def test_constructor_nonhashable_name(self, indices):
# GH 20527
if isinstance(indices, MultiIndex):
pytest.skip("multiindex handled in test_multi.py")
name = ['0']
message = "Index.name must be a hashable type"
tm.assert_raises_regex(TypeError, message, name=name)
# With .rename()
renamed = [['1']]
tm.assert_raises_regex(TypeError, message,
indices.rename, name=renamed)
# With .set_names()
tm.assert_raises_regex(TypeError, message,
indices.set_names, names=renamed)
def test_constructor_overflow_int64(self):
# see gh-15832
msg = ("the elements provided in the data cannot "
"all be casted to the dtype int64")
with tm.assert_raises_regex(OverflowError, msg):
Index([np.iinfo(np.uint64).max - 1], dtype="int64")
def test_view_with_args(self):
restricted = ['unicodeIndex', 'strIndex', 'catIndex', 'boolIndex',
'empty']
for i in restricted:
ind = self.indices[i]
# with arguments
pytest.raises(TypeError, lambda: ind.view('i8'))
# these are ok
for i in list(set(self.indices.keys()) - set(restricted)):
ind = self.indices[i]
# with arguments
ind.view('i8')
def test_astype(self):
casted = self.intIndex.astype('i8')
# it works!
casted.get_loc(5)
# pass on name
self.intIndex.name = 'foobar'
casted = self.intIndex.astype('i8')
assert casted.name == 'foobar'
def test_equals_object(self):
# same
assert Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'c']))
@pytest.mark.parametrize("comp", [
Index(['a', 'b']), Index(['a', 'b', 'd']), ['a', 'b', 'c']])
def test_not_equals_object(self, comp):
assert not Index(['a', 'b', 'c']).equals(comp)
def test_insert(self):
# GH 7256
# validate neg/pos inserts
result = Index(['b', 'c', 'd'])
# test 0th element
tm.assert_index_equal(Index(['a', 'b', 'c', 'd']),
result.insert(0, 'a'))
# test Nth element that follows Python list behavior
tm.assert_index_equal(Index(['b', 'c', 'e', 'd']),
result.insert(-1, 'e'))
# test loc +/- neq (0, -1)
tm.assert_index_equal(result.insert(1, 'z'), result.insert(-2, 'z'))
# test empty
null_index = Index([])
tm.assert_index_equal(Index(['a']), null_index.insert(0, 'a'))
def test_insert_missing(self, nulls_fixture):
# GH 18295 (test missing)
expected = Index(['a', np.nan, 'b', 'c'])
result = Index(list('abc')).insert(1, nulls_fixture)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("pos,expected", [
(0, Index(['b', 'c', 'd'], name='index')),
(-1, Index(['a', 'b', 'c'], name='index'))
])
def test_delete(self, pos, expected):
index = Index(['a', 'b', 'c', 'd'], name='index')
result = index.delete(pos)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
def test_delete_raises(self):
index = Index(['a', 'b', 'c', 'd'], name='index')
with pytest.raises((IndexError, ValueError)):
# either depending on numpy version
index.delete(5)
def test_identical(self):
# index
i1 = Index(['a', 'b', 'c'])
i2 = Index(['a', 'b', 'c'])
assert i1.identical(i2)
i1 = i1.rename('foo')
assert i1.equals(i2)
assert not i1.identical(i2)
i2 = i2.rename('foo')
assert i1.identical(i2)
i3 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')])
i4 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')], tupleize_cols=False)
assert not i3.identical(i4)
def test_is_(self):
ind = Index(range(10))
assert ind.is_(ind)
assert ind.is_(ind.view().view().view().view())
assert not ind.is_(Index(range(10)))
assert not ind.is_(ind.copy())
assert not ind.is_(ind.copy(deep=False))
assert not ind.is_(ind[:])
assert not ind.is_(np.array(range(10)))
# quasi-implementation dependent
assert ind.is_(ind.view())
ind2 = ind.view()
ind2.name = 'bob'
assert ind.is_(ind2)
assert ind2.is_(ind)
# doesn't matter if Indices are *actually* views of underlying data,
assert not ind.is_(Index(ind.values))
arr = np.array(range(1, 11))
ind1 = Index(arr, copy=False)
ind2 = Index(arr, copy=False)
assert not ind1.is_(ind2)
def test_asof(self):
d = self.dateIndex[0]
assert self.dateIndex.asof(d) == d
assert isna(self.dateIndex.asof(d - timedelta(1)))
d = self.dateIndex[-1]
assert self.dateIndex.asof(d + timedelta(1)) == d
d = self.dateIndex[0].to_pydatetime()
assert isinstance(self.dateIndex.asof(d), Timestamp)
def test_asof_datetime_partial(self):
index = pd.date_range('2010-01-01', periods=2, freq='m')
expected = Timestamp('2010-02-28')
result = index.asof('2010-02')
assert result == expected
assert not isinstance(result, Index)
def test_nanosecond_index_access(self):
s = Series([Timestamp('20130101')]).values.view('i8')[0]
r = DatetimeIndex([s + 50 + i for i in range(100)])
x = Series(np.random.randn(100), index=r)
first_value = x.asof(x.index[0])
# this does not yet work, as parsing strings is done via dateutil
# assert first_value == x['2013-01-01 00:00:00.000000050+0000']
expected_ts = np_datetime64_compat('2013-01-01 00:00:00.000000050+'
'0000', 'ns')
assert first_value == x[Timestamp(expected_ts)]
@pytest.mark.parametrize("op", [
operator.eq, operator.ne, operator.gt, operator.lt,
operator.ge, operator.le
])
def test_comparators(self, op):
index = self.dateIndex
element = index[len(index) // 2]
element = _to_m8(element)
arr = np.array(index)
arr_result = op(arr, element)
index_result = op(index, element)
assert isinstance(index_result, np.ndarray)
tm.assert_numpy_array_equal(arr_result, index_result)
def test_booleanindex(self):
boolIndex = np.repeat(True, len(self.strIndex)).astype(bool)
boolIndex[5:30:2] = False
subIndex = self.strIndex[boolIndex]
for i, val in enumerate(subIndex):
assert subIndex.get_loc(val) == i
subIndex = self.strIndex[list(boolIndex)]
for i, val in enumerate(subIndex):
assert subIndex.get_loc(val) == i
def test_fancy(self):
sl = self.strIndex[[1, 2, 3]]
for i in sl:
assert i == sl[sl.get_loc(i)]
@pytest.mark.parametrize("attr", [
'strIndex', 'intIndex', 'floatIndex'])
@pytest.mark.parametrize("dtype", [np.int_, np.bool_])
def test_empty_fancy(self, attr, dtype):
empty_arr = np.array([], dtype=dtype)
index = getattr(self, attr)
empty_index = index.__class__([])
assert index[[]].identical(empty_index)
assert index[empty_arr].identical(empty_index)
@pytest.mark.parametrize("attr", [
'strIndex', 'intIndex', 'floatIndex'])
def test_empty_fancy_raises(self, attr):
# pd.DatetimeIndex is excluded, because it overrides getitem and should
# be tested separately.
empty_farr = np.array([], dtype=np.float_)
index = getattr(self, attr)
empty_index = index.__class__([])
assert index[[]].identical(empty_index)
# np.ndarray only accepts ndarray of int & bool dtypes, so should Index
pytest.raises(IndexError, index.__getitem__, empty_farr)
@pytest.mark.parametrize("itm", [101, 'no_int'])
def test_getitem_error(self, indices, itm):
with pytest.raises(IndexError):
indices[itm]
def test_intersection(self):
first = self.strIndex[:20]
second = self.strIndex[:10]
intersect = first.intersection(second)
assert tm.equalContents(intersect, second)
# Corner cases
inter = first.intersection(first)
assert inter is first
@pytest.mark.parametrize("index2,keeps_name", [
(Index([3, 4, 5, 6, 7], name="index"), True), # preserve same name
(Index([3, 4, 5, 6, 7], name="other"), False), # drop diff names
(Index([3, 4, 5, 6, 7]), False)])
def test_intersection_name_preservation(self, index2, keeps_name):
index1 = Index([1, 2, 3, 4, 5], name='index')
expected = Index([3, 4, 5])
result = index1.intersection(index2)
if keeps_name:
expected.name = 'index'
assert result.name == expected.name
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("first_name,second_name,expected_name", [
('A', 'A', 'A'), ('A', 'B', None), (None, 'B', None)])
def test_intersection_name_preservation2(self, first_name, second_name,
expected_name):
first = self.strIndex[5:20]
second = self.strIndex[:10]
first.name = first_name
second.name = second_name
intersect = first.intersection(second)
assert intersect.name == expected_name
@pytest.mark.parametrize("index2,keeps_name", [
(Index([4, 7, 6, 5, 3], name='index'), True),
(Index([4, 7, 6, 5, 3], name='other'), False)])
def test_intersection_monotonic(self, index2, keeps_name):
index1 = Index([5, 3, 2, 4, 1], name='index')
expected = Index([5, 3, 4])
if keeps_name:
expected.name = "index"
result = index1.intersection(index2)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("index2,expected_arr", [
(Index(['B', 'D']), ['B']),
(Index(['B', 'D', 'A']), ['A', 'B', 'A'])])
def test_intersection_non_monotonic_non_unique(self, index2, expected_arr):
# non-monotonic non-unique
index1 = Index(['A', 'B', 'A', 'C'])
expected = Index(expected_arr, dtype='object')
result = index1.intersection(index2)
tm.assert_index_equal(result, expected)
def test_intersect_str_dates(self):
dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)]
i1 = Index(dt_dates, dtype=object)
i2 = Index(['aa'], dtype=object)
result = i2.intersection(i1)
assert len(result) == 0
def test_union(self):
# TODO: Replace with fixturesult
first = self.strIndex[5:20]
second = self.strIndex[:10]
everything = self.strIndex[:20]
union = first.union(second)
assert tm.equalContents(union, everything)
@pytest.mark.parametrize("klass", [
np.array, Series, list])
def test_union_from_iterables(self, klass):
# GH 10149
# TODO: Replace with fixturesult
first = self.strIndex[5:20]
second = self.strIndex[:10]
everything = self.strIndex[:20]
case = klass(second.values)
result = first.union(case)
assert tm.equalContents(result, everything)
def test_union_identity(self):
# TODO: replace with fixturesult
first = self.strIndex[5:20]
union = first.union(first)
assert union is first
union = first.union([])
assert union is first
union = Index([]).union(first)
assert union is first
@pytest.mark.parametrize("first_list", [list('ab'), list()])
@pytest.mark.parametrize("second_list", [list('ab'), list()])
@pytest.mark.parametrize("first_name, second_name, expected_name", [
('A', 'B', None), (None, 'B', 'B'), ('A', None, 'A')])
def test_union_name_preservation(self, first_list, second_list, first_name,
second_name, expected_name):
first = Index(first_list, name=first_name)
second = Index(second_list, name=second_name)
union = first.union(second)
vals = sorted(set(first_list).union(second_list))
expected = Index(vals, name=expected_name)
tm.assert_index_equal(union, expected)
def test_union_dt_as_obj(self):
# TODO: Replace with fixturesult
with tm.assert_produces_warning(RuntimeWarning):
firstCat = self.strIndex.union(self.dateIndex)
secondCat = self.strIndex.union(self.strIndex)
if self.dateIndex.dtype == np.object_:
appended = np.append(self.strIndex, self.dateIndex)
else:
appended = np.append(self.strIndex, self.dateIndex.astype('O'))
assert tm.equalContents(firstCat, appended)
assert tm.equalContents(secondCat, self.strIndex)
tm.assert_contains_all(self.strIndex, firstCat)
tm.assert_contains_all(self.strIndex, secondCat)
tm.assert_contains_all(self.dateIndex, firstCat)
def test_add(self):
index = self.strIndex
expected = Index(self.strIndex.values * 2)
tm.assert_index_equal(index + index, expected)
tm.assert_index_equal(index + index.tolist(), expected)
tm.assert_index_equal(index.tolist() + index, expected)
# test add and radd
index = Index(list('abc'))
expected = Index(['a1', 'b1', 'c1'])
tm.assert_index_equal(index + '1', expected)
expected = Index(['1a', '1b', '1c'])
tm.assert_index_equal('1' + index, expected)
def test_sub(self):
index = self.strIndex
pytest.raises(TypeError, lambda: index - 'a')
pytest.raises(TypeError, lambda: index - index)
pytest.raises(TypeError, lambda: index - index.tolist())
pytest.raises(TypeError, lambda: index.tolist() - index)
def test_map_identity_mapping(self):
# GH 12766
# TODO: replace with fixture
for name, cur_index in self.indices.items():
tm.assert_index_equal(cur_index, cur_index.map(lambda x: x))
def test_map_with_tuples(self):
# GH 12766
# Test that returning a single tuple from an Index
# returns an Index.
index = tm.makeIntIndex(3)
result = tm.makeIntIndex(3).map(lambda x: (x,))
expected = Index([(i,) for i in index])
tm.assert_index_equal(result, expected)
# Test that returning a tuple from a map of a single index
# returns a MultiIndex object.
result = index.map(lambda x: (x, x == 1))
expected = MultiIndex.from_tuples([(i, i == 1) for i in index])
tm.assert_index_equal(result, expected)
def test_map_with_tuples_mi(self):
# Test that returning a single object from a MultiIndex
# returns an Index.
first_level = ['foo', 'bar', 'baz']
multi_index = MultiIndex.from_tuples(lzip(first_level, [1, 2, 3]))
reduced_index = multi_index.map(lambda x: x[0])
tm.assert_index_equal(reduced_index, Index(first_level))
@pytest.mark.parametrize("attr", [
'makeDateIndex', 'makePeriodIndex', 'makeTimedeltaIndex'])
def test_map_tseries_indices_return_index(self, attr):
index = getattr(tm, attr)(10)
expected = Index([1] * 10)
result = index.map(lambda x: 1)
tm.assert_index_equal(expected, result)
def test_map_tseries_indices_accsr_return_index(self):
date_index = tm.makeDateIndex(24, freq='h', name='hourly')
expected = Index(range(24), name='hourly')
tm.assert_index_equal(expected, date_index.map(lambda x: x.hour))
@pytest.mark.parametrize(
"mapper",
[
lambda values, index: {i: e for e, i in zip(values, index)},
lambda values, index: pd.Series(values, index)])
def test_map_dictlike(self, mapper):
# GH 12756
expected = Index(['foo', 'bar', 'baz'])
index = tm.makeIntIndex(3)
result = index.map(mapper(expected.values, index))
tm.assert_index_equal(result, expected)
# TODO: replace with fixture
for name in self.indices.keys():
if name == 'catIndex':
# Tested in test_categorical
continue
elif name == 'repeats':
# Cannot map duplicated index
continue
index = self.indices[name]
expected = Index(np.arange(len(index), 0, -1))
# to match proper result coercion for uints
if name == 'empty':
expected = Index([])
result = index.map(mapper(expected, index))
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("mapper", [
Series(['foo', 2., 'baz'], index=[0, 2, -1]),
{0: 'foo', 2: 2.0, -1: 'baz'}])
def test_map_with_non_function_missing_values(self, mapper):
# GH 12756
expected = Index([2., np.nan, 'foo'])
result = Index([2, 1, 0]).map(mapper)
tm.assert_index_equal(expected, result)
def test_map_na_exclusion(self):
index = Index([1.5, np.nan, 3, np.nan, 5])
result = index.map(lambda x: x * 2, na_action='ignore')
expected = index * 2
tm.assert_index_equal(result, expected)
def test_map_defaultdict(self):
index = Index([1, 2, 3])
default_dict = defaultdict(lambda: 'blank')
default_dict[1] = 'stuff'
result = index.map(default_dict)
expected = Index(['stuff', 'blank', 'blank'])
tm.assert_index_equal(result, expected)
def test_append_multiple(self):
index = Index(['a', 'b', 'c', 'd', 'e', 'f'])
foos = [index[:2], index[2:4], index[4:]]
result = foos[0].append(foos[1:])
tm.assert_index_equal(result, index)
# empty
result = index.append([])
tm.assert_index_equal(result, index)
@pytest.mark.parametrize("name,expected", [
('foo', 'foo'), ('bar', None)])
def test_append_empty_preserve_name(self, name, expected):
left = Index([], name='foo')
right = Index([1, 2, 3], name=name)
result = left.append(right)
assert result.name == expected
def test_add_string(self):
# from bug report
index = Index(['a', 'b', 'c'])
index2 = index + 'foo'
assert 'a' not in index2
assert 'afoo' in index2
def test_iadd_string(self):
index = pd.Index(['a', 'b', 'c'])
# doesn't fail test unless there is a check before `+=`
assert 'a' in index
index += '_x'
assert 'a_x' in index
@pytest.mark.parametrize("second_name,expected", [
(None, None), ('name', 'name')])
def test_difference_name_preservation(self, second_name, expected):
# TODO: replace with fixturesult
first = self.strIndex[5:20]
second = self.strIndex[:10]
answer = self.strIndex[10:20]
first.name = 'name'
second.name = second_name
result = first.difference(second)
assert tm.equalContents(result, answer)
if expected is None:
assert result.name is None
else:
assert result.name == expected
def test_difference_empty_arg(self):
first = self.strIndex[5:20]
first.name == 'name'
result = first.difference([])
assert tm.equalContents(result, first)
assert result.name == first.name
def test_difference_identity(self):
first = self.strIndex[5:20]
first.name == 'name'
result = first.difference(first)
assert len(result) == 0
assert result.name == first.name
def test_symmetric_difference(self):
# smoke
index1 = Index([1, 2, 3, 4], name='index1')
index2 = Index([2, 3, 4, 5])
result = index1.symmetric_difference(index2)
expected = Index([1, 5])
assert tm.equalContents(result, expected)
assert result.name is None
# __xor__ syntax
expected = index1 ^ index2
assert tm.equalContents(result, expected)
assert result.name is None
def test_symmetric_difference_mi(self):
index1 = MultiIndex.from_tuples(self.tuples)
index2 = MultiIndex.from_tuples([('foo', 1), ('bar', 3)])
result = index1.symmetric_difference(index2)
expected = MultiIndex.from_tuples([('bar', 2), ('baz', 3), ('bar', 3)])
assert tm.equalContents(result, expected)
@pytest.mark.parametrize("index2,expected", [
(Index([0, 1, np.nan]), Index([0.0, 2.0, 3.0])),
(Index([0, 1]), Index([0.0, 2.0, 3.0, np.nan]))])
def test_symmetric_difference_missing(self, index2, expected):
# GH 13514 change: {nan} - {nan} == {}
# (GH 6444, sorting of nans, is no longer an issue)
index1 = Index([1, np.nan, 2, 3])
result = index1.symmetric_difference(index2)
tm.assert_index_equal(result, expected)
def test_symmetric_difference_non_index(self):
index1 = Index([1, 2, 3, 4], name='index1')
index2 = np.array([2, 3, 4, 5])
expected = Index([1, 5])
result = index1.symmetric_difference(index2)
assert tm.equalContents(result, expected)
assert result.name == 'index1'
result = index1.symmetric_difference(index2, result_name='new_name')
assert tm.equalContents(result, expected)
assert result.name == 'new_name'
def test_difference_type(self):
# GH 20040
# If taking difference of a set and itself, it
# needs to preserve the type of the index
skip_index_keys = ['repeats']
for key, index in self.generate_index_types(skip_index_keys):
result = index.difference(index)
expected = index.drop(index)
tm.assert_index_equal(result, expected)
def test_intersection_difference(self):
# GH 20040
# Test that the intersection of an index with an
# empty index produces the same index as the difference
# of an index with itself. Test for all types
skip_index_keys = ['repeats']
for key, index in self.generate_index_types(skip_index_keys):
inter = index.intersection(index.drop(index))
diff = index.difference(index)
tm.assert_index_equal(inter, diff)
@pytest.mark.parametrize("attr,expected", [
('strIndex', False), ('boolIndex', False), ('catIndex', False),
('intIndex', True), ('dateIndex', False), ('floatIndex', True)])
def test_is_numeric(self, attr, expected):
assert getattr(self, attr).is_numeric() == expected
@pytest.mark.parametrize("attr,expected", [
('strIndex', True), ('boolIndex', True), ('catIndex', False),
('intIndex', False), ('dateIndex', False), ('floatIndex', False)])
def test_is_object(self, attr, expected):
assert getattr(self, attr).is_object() == expected
@pytest.mark.parametrize("attr,expected", [
('strIndex', False), ('boolIndex', False), ('catIndex', False),
('intIndex', False), ('dateIndex', True), ('floatIndex', False)])
def test_is_all_dates(self, attr, expected):
assert getattr(self, attr).is_all_dates == expected
def test_summary(self):
self._check_method_works(Index._summary)
# GH3869
ind = Index(['{other}%s', "~:{range}:0"], name='A')
result = ind._summary()
# shouldn't be formatted accidentally.
assert '~:{range}:0' in result
assert '{other}%s' in result
# GH18217
def test_summary_deprecated(self):
ind = Index(['{other}%s', "~:{range}:0"], name='A')
with tm.assert_produces_warning(FutureWarning):
ind.summary()
def test_format(self):
self._check_method_works(Index.format)
# GH 14626
# windows has different precision on datetime.datetime.now (it doesn't
# include us since the default for Timestamp shows these but Index
# formatting does not we are skipping)
now = datetime.now()
if not str(now).endswith("000"):
index = Index([now])
formatted = index.format()
expected = [str(index[0])]
assert formatted == expected
self.strIndex[:0].format()
@pytest.mark.parametrize("vals", [
[1, 2.0 + 3.0j, 4.], ['a', 'b', 'c']])
def test_format_missing(self, vals, nulls_fixture):
# 2845
vals = list(vals) # Copy for each iteration
vals.append(nulls_fixture)
index = Index(vals)
formatted = index.format()
expected = [str(index[0]), str(index[1]), str(index[2]), u('NaN')]
assert formatted == expected
assert index[3] is nulls_fixture
def test_format_with_name_time_info(self):
# bug I fixed 12/20/2011
inc = timedelta(hours=4)
dates = Index([dt + inc for dt in self.dateIndex], name='something')
formatted = dates.format(name=True)
assert formatted[0] == 'something'
def test_format_datetime_with_time(self):
t = Index([datetime(2012, 2, 7), datetime(2012, 2, 7, 23)])
result = t.format()
expected = ['2012-02-07 00:00:00', '2012-02-07 23:00:00']
assert len(result) == 2
assert result == expected
@pytest.mark.parametrize("op", ['any', 'all'])
def test_logical_compat(self, op):
index = self.create_index()
assert getattr(index, op)() == getattr(index.values, op)()
def _check_method_works(self, method):
# TODO: make this a dedicated test with parametrized methods
method(self.empty)
method(self.dateIndex)
method(self.unicodeIndex)
method(self.strIndex)
method(self.intIndex)
method(self.tuples)
method(self.catIndex)
def test_get_indexer(self):
index1 = Index([1, 2, 3, 4, 5])
index2 = Index([2, 4, 6])
r1 = index1.get_indexer(index2)
e1 = np.array([1, 3, -1], dtype=np.intp)
assert_almost_equal(r1, e1)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("expected,method", [
(np.array([-1, 0, 0, 1, 1], dtype=np.intp), 'pad'),
(np.array([-1, 0, 0, 1, 1], dtype=np.intp), 'ffill'),
(np.array([0, 0, 1, 1, 2], dtype=np.intp), 'backfill'),
(np.array([0, 0, 1, 1, 2], dtype=np.intp), 'bfill')])
def test_get_indexer_methods(self, reverse, expected, method):
index1 = Index([1, 2, 3, 4, 5])
index2 = Index([2, 4, 6])
if reverse:
index1 = index1[::-1]
expected = expected[::-1]
result = index2.get_indexer(index1, method=method)
assert_almost_equal(result, expected)
def test_get_indexer_invalid(self):
# GH10411
index = Index(np.arange(10))
with tm.assert_raises_regex(ValueError, 'tolerance argument'):
index.get_indexer([1, 0], tolerance=1)
with tm.assert_raises_regex(ValueError, 'limit argument'):
index.get_indexer([1, 0], limit=1)
@pytest.mark.parametrize(
'method, tolerance, indexer, expected',
[
('pad', None, [0, 5, 9], [0, 5, 9]),
('backfill', None, [0, 5, 9], [0, 5, 9]),
('nearest', None, [0, 5, 9], [0, 5, 9]),
('pad', 0, [0, 5, 9], [0, 5, 9]),
('backfill', 0, [0, 5, 9], [0, 5, 9]),
('nearest', 0, [0, 5, 9], [0, 5, 9]),
('pad', None, [0.2, 1.8, 8.5], [0, 1, 8]),
('backfill', None, [0.2, 1.8, 8.5], [1, 2, 9]),
('nearest', None, [0.2, 1.8, 8.5], [0, 2, 9]),
('pad', 1, [0.2, 1.8, 8.5], [0, 1, 8]),
('backfill', 1, [0.2, 1.8, 8.5], [1, 2, 9]),
('nearest', 1, [0.2, 1.8, 8.5], [0, 2, 9]),
('pad', 0.2, [0.2, 1.8, 8.5], [0, -1, -1]),
('backfill', 0.2, [0.2, 1.8, 8.5], [-1, 2, -1]),
('nearest', 0.2, [0.2, 1.8, 8.5], [0, 2, -1])])
def test_get_indexer_nearest(self, method, tolerance, indexer, expected):
index = Index(np.arange(10))
actual = index.get_indexer(indexer, method=method, tolerance=tolerance)
tm.assert_numpy_array_equal(actual, np.array(expected,
dtype=np.intp))
@pytest.mark.parametrize('listtype', [list, tuple, Series, np.array])
@pytest.mark.parametrize(
'tolerance, expected',
list(zip([[0.3, 0.3, 0.1], [0.2, 0.1, 0.1],
[0.1, 0.5, 0.5]],
[[0, 2, -1], [0, -1, -1],
[-1, 2, 9]])))
def test_get_indexer_nearest_listlike_tolerance(self, tolerance,
expected, listtype):
index = Index(np.arange(10))
actual = index.get_indexer([0.2, 1.8, 8.5], method='nearest',
tolerance=listtype(tolerance))
tm.assert_numpy_array_equal(actual, np.array(expected,
dtype=np.intp))
def test_get_indexer_nearest_error(self):
index = Index(np.arange(10))
with tm.assert_raises_regex(ValueError, 'limit argument'):
index.get_indexer([1, 0], method='nearest', limit=1)
with pytest.raises(ValueError, match='tolerance size must match'):
index.get_indexer([1, 0], method='nearest',
tolerance=[1, 2, 3])
@pytest.mark.parametrize("method,expected", [
('pad', [8, 7, 0]), ('backfill', [9, 8, 1]), ('nearest', [9, 7, 0])])
def test_get_indexer_nearest_decreasing(self, method, expected):
index = Index(np.arange(10))[::-1]
actual = index.get_indexer([0, 5, 9], method=method)
tm.assert_numpy_array_equal(actual, np.array([9, 4, 0], dtype=np.intp))
actual = index.get_indexer([0.2, 1.8, 8.5], method=method)
tm.assert_numpy_array_equal(actual, np.array(expected, dtype=np.intp))
@pytest.mark.parametrize("method,expected", [
('pad', np.array([-1, 0, 1, 1], dtype=np.intp)),
('backfill', np.array([0, 0, 1, -1], dtype=np.intp))])
def test_get_indexer_strings(self, method, expected):
index = pd.Index(['b', 'c'])
actual = index.get_indexer(['a', 'b', 'c', 'd'], method=method)
tm.assert_numpy_array_equal(actual, expected)
def test_get_indexer_strings_raises(self):
index = pd.Index(['b', 'c'])
with pytest.raises(TypeError):
index.get_indexer(['a', 'b', 'c', 'd'], method='nearest')
with pytest.raises(TypeError):
index.get_indexer(['a', 'b', 'c', 'd'], method='pad', tolerance=2)
with pytest.raises(TypeError):
index.get_indexer(['a', 'b', 'c', 'd'], method='pad',
tolerance=[2, 2, 2, 2])
def test_get_indexer_numeric_index_boolean_target(self):
# GH 16877
numeric_index = pd.Index(range(4))
result = numeric_index.get_indexer([True, False, True])
expected = np.array([-1, -1, -1], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("method", [None, 'pad', 'backfill', 'nearest'])
def test_get_loc(self, method):
index = pd.Index([0, 1, 2])
assert index.get_loc(1, method=method) == 1
if method:
assert index.get_loc(1, method=method, tolerance=0) == 1
@pytest.mark.parametrize("method", [None, 'pad', 'backfill', 'nearest'])
def test_get_loc_raises_bad_label(self, method):
index = pd.Index([0, 1, 2])
if method:
# Messages vary across versions
if PY36:
msg = 'not supported between'
elif PY35:
msg = 'unorderable types'
else:
if method == 'nearest':
msg = 'unsupported operand'
else:
msg = 'requires scalar valued input'
else:
msg = 'invalid key'
with tm.assert_raises_regex(TypeError, msg):
index.get_loc([1, 2], method=method)
@pytest.mark.parametrize("method,loc", [
('pad', 1), ('backfill', 2), ('nearest', 1)])
def test_get_loc_tolerance(self, method, loc):
index = pd.Index([0, 1, 2])
assert index.get_loc(1.1, method) == loc
assert index.get_loc(1.1, method, tolerance=1) == loc
@pytest.mark.parametrize("method", ['pad', 'backfill', 'nearest'])
def test_get_loc_outside_tolerance_raises(self, method):
index = pd.Index([0, 1, 2])
with tm.assert_raises_regex(KeyError, '1.1'):
index.get_loc(1.1, method, tolerance=0.05)
def test_get_loc_bad_tolerance_raises(self):
index = pd.Index([0, 1, 2])
with tm.assert_raises_regex(ValueError, 'must be numeric'):
index.get_loc(1.1, 'nearest', tolerance='invalid')
def test_get_loc_tolerance_no_method_raises(self):
index = pd.Index([0, 1, 2])
with tm.assert_raises_regex(ValueError, 'tolerance .* valid if'):
index.get_loc(1.1, tolerance=1)
def test_get_loc_raises_missized_tolerance(self):
index = pd.Index([0, 1, 2])
with tm.assert_raises_regex(ValueError, 'tolerance size must match'):
index.get_loc(1.1, 'nearest', tolerance=[1, 1])
def test_get_loc_raises_object_nearest(self):
index = pd.Index(['a', 'c'])
with tm.assert_raises_regex(TypeError, 'unsupported operand type'):
index.get_loc('a', method='nearest')
def test_get_loc_raises_object_tolerance(self):
index = pd.Index(['a', 'c'])
with tm.assert_raises_regex(TypeError, 'unsupported operand type'):
index.get_loc('a', method='pad', tolerance='invalid')
@pytest.mark.parametrize("dtype", [int, float])
def test_slice_locs(self, dtype):
index = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=dtype))
n = len(index)
assert index.slice_locs(start=2) == (2, n)
assert index.slice_locs(start=3) == (3, n)
assert index.slice_locs(3, 8) == (3, 6)
assert index.slice_locs(5, 10) == (3, n)
assert index.slice_locs(end=8) == (0, 6)
assert index.slice_locs(end=9) == (0, 7)
# reversed
index2 = index[::-1]
assert index2.slice_locs(8, 2) == (2, 6)
assert index2.slice_locs(7, 3) == (2, 5)
def test_slice_float_locs(self):
index = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=float))
n = len(index)
assert index.slice_locs(5.0, 10.0) == (3, n)
assert index.slice_locs(4.5, 10.5) == (3, 8)
index2 = index[::-1]
assert index2.slice_locs(8.5, 1.5) == (2, 6)
assert index2.slice_locs(10.5, -1) == (0, n)
@pytest.mark.xfail(reason="Assertions were not correct - see GH 20915")
def test_slice_ints_with_floats_raises(self):
# int slicing with floats
# GH 4892, these are all TypeErrors
index = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=int))
n = len(index)
pytest.raises(TypeError,
lambda: index.slice_locs(5.0, 10.0))
pytest.raises(TypeError,
lambda: index.slice_locs(4.5, 10.5))
index2 = index[::-1]
pytest.raises(TypeError,
lambda: index2.slice_locs(8.5, 1.5), (2, 6))
pytest.raises(TypeError,
lambda: index2.slice_locs(10.5, -1), (0, n))
def test_slice_locs_dup(self):
index = Index(['a', 'a', 'b', 'c', 'd', 'd'])
assert index.slice_locs('a', 'd') == (0, 6)
assert index.slice_locs(end='d') == (0, 6)
assert index.slice_locs('a', 'c') == (0, 4)
assert index.slice_locs('b', 'd') == (2, 6)
index2 = index[::-1]
assert index2.slice_locs('d', 'a') == (0, 6)
assert index2.slice_locs(end='a') == (0, 6)
assert index2.slice_locs('d', 'b') == (0, 4)
assert index2.slice_locs('c', 'a') == (2, 6)
@pytest.mark.parametrize("dtype", [int, float])
def test_slice_locs_dup_numeric(self, dtype):
index = Index(np.array([10, 12, 12, 14], dtype=dtype))
assert index.slice_locs(12, 12) == (1, 3)
assert index.slice_locs(11, 13) == (1, 3)
index2 = index[::-1]
assert index2.slice_locs(12, 12) == (1, 3)
assert index2.slice_locs(13, 11) == (1, 3)
def test_slice_locs_na(self):
index = Index([np.nan, 1, 2])
assert index.slice_locs(1) == (1, 3)
assert index.slice_locs(np.nan) == (0, 3)
index = Index([0, np.nan, np.nan, 1, 2])
assert index.slice_locs(np.nan) == (1, 5)
def test_slice_locs_na_raises(self):
index = Index([np.nan, 1, 2])
with tm.assert_raises_regex(KeyError, ''):
index.slice_locs(start=1.5)
with tm.assert_raises_regex(KeyError, ''):
index.slice_locs(end=1.5)
@pytest.mark.parametrize("in_slice,expected", [
(pd.IndexSlice[::-1], 'yxdcb'), (pd.IndexSlice['b':'y':-1], ''),
(pd.IndexSlice['b'::-1], 'b'), (pd.IndexSlice[:'b':-1], 'yxdcb'),
(pd.IndexSlice[:'y':-1], 'y'), (pd.IndexSlice['y'::-1], 'yxdcb'),
(pd.IndexSlice['y'::-4], 'yb'),
# absent labels
(pd.IndexSlice[:'a':-1], 'yxdcb'), (pd.IndexSlice[:'a':-2], 'ydb'),
(pd.IndexSlice['z'::-1], 'yxdcb'), (pd.IndexSlice['z'::-3], 'yc'),
(pd.IndexSlice['m'::-1], 'dcb'), (pd.IndexSlice[:'m':-1], 'yx'),
(pd.IndexSlice['a':'a':-1], ''), (pd.IndexSlice['z':'z':-1], ''),
(pd.IndexSlice['m':'m':-1], '')
])
def test_slice_locs_negative_step(self, in_slice, expected):
index = Index(list('bcdxy'))
s_start, s_stop = index.slice_locs(in_slice.start, in_slice.stop,
in_slice.step)
result = index[s_start:s_stop:in_slice.step]
expected = pd.Index(list(expected))
tm.assert_index_equal(result, expected)
def test_drop_by_str_label(self):
# TODO: Parametrize these after replacing self.strIndex with fixture
n = len(self.strIndex)
drop = self.strIndex[lrange(5, 10)]
dropped = self.strIndex.drop(drop)
expected = self.strIndex[lrange(5) + lrange(10, n)]
tm.assert_index_equal(dropped, expected)
dropped = self.strIndex.drop(self.strIndex[0])
expected = self.strIndex[1:]
tm.assert_index_equal(dropped, expected)
@pytest.mark.parametrize("keys", [['foo', 'bar'], ['1', 'bar']])
def test_drop_by_str_label_raises_missing_keys(self, keys):
with tm.assert_raises_regex(KeyError, ''):
self.strIndex.drop(keys)
def test_drop_by_str_label_errors_ignore(self):
# TODO: Parametrize these after replacing self.strIndex with fixture
# errors='ignore'
n = len(self.strIndex)
drop = self.strIndex[lrange(5, 10)]
mixed = drop.tolist() + ['foo']
dropped = self.strIndex.drop(mixed, errors='ignore')
expected = self.strIndex[lrange(5) + lrange(10, n)]
tm.assert_index_equal(dropped, expected)
dropped = self.strIndex.drop(['foo', 'bar'], errors='ignore')
expected = self.strIndex[lrange(n)]
tm.assert_index_equal(dropped, expected)
def test_drop_by_numeric_label_loc(self):
# TODO: Parametrize numeric and str tests after self.strIndex fixture
index = Index([1, 2, 3])
dropped = index.drop(1)
expected = Index([2, 3])
tm.assert_index_equal(dropped, expected)
def test_drop_by_numeric_label_raises_missing_keys(self):
index = Index([1, 2, 3])
with tm.assert_raises_regex(KeyError, ''):
index.drop([3, 4])
@pytest.mark.parametrize("key,expected", [
(4, Index([1, 2, 3])), ([3, 4, 5], Index([1, 2]))])
def test_drop_by_numeric_label_errors_ignore(self, key, expected):
index = Index([1, 2, 3])
dropped = index.drop(key, errors='ignore')
tm.assert_index_equal(dropped, expected)
@pytest.mark.parametrize("values", [['a', 'b', ('c', 'd')],
['a', ('c', 'd'), 'b'],
[('c', 'd'), 'a', 'b']])
@pytest.mark.parametrize("to_drop", [[('c', 'd'), 'a'], ['a', ('c', 'd')]])
def test_drop_tuple(self, values, to_drop):
# GH 18304
index = pd.Index(values)
expected = pd.Index(['b'])
result = index.drop(to_drop)
tm.assert_index_equal(result, expected)
removed = index.drop(to_drop[0])
for drop_me in to_drop[1], [to_drop[1]]:
result = removed.drop(drop_me)
tm.assert_index_equal(result, expected)
removed = index.drop(to_drop[1])
for drop_me in to_drop[1], [to_drop[1]]:
pytest.raises(KeyError, removed.drop, drop_me)
@pytest.mark.parametrize("method,expected", [
('intersection', np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B')],
dtype=[('num', int), ('let', 'a1')])),
('union', np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B'), (1, 'C'),
(2, 'C')], dtype=[('num', int), ('let', 'a1')]))
])
def test_tuple_union_bug(self, method, expected):
index1 = Index(np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B')],
dtype=[('num', int), ('let', 'a1')]))
index2 = Index(np.array([(1, 'A'), (2, 'A'), (1, 'B'),
(2, 'B'), (1, 'C'), (2, 'C')],
dtype=[('num', int), ('let', 'a1')]))
result = getattr(index1, method)(index2)
assert result.ndim == 1
expected = Index(expected)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("attr", [
'is_monotonic_increasing', 'is_monotonic_decreasing',
'_is_strictly_monotonic_increasing',
'_is_strictly_monotonic_decreasing'])
def test_is_monotonic_incomparable(self, attr):
index = Index([5, datetime.now(), 7])
assert not getattr(index, attr)
def test_get_set_value(self):
# TODO: Remove function? GH 19728
values = np.random.randn(100)
date = self.dateIndex[67]
assert_almost_equal(self.dateIndex.get_value(values, date), values[67])
self.dateIndex.set_value(values, date, 10)
assert values[67] == 10
@pytest.mark.parametrize("values", [
['foo', 'bar', 'quux'], {'foo', 'bar', 'quux'}])
@pytest.mark.parametrize("index,expected", [
(Index(['qux', 'baz', 'foo', 'bar']),
np.array([False, False, True, True])),
(Index([]), np.array([], dtype=bool)) # empty
])
def test_isin(self, values, index, expected):
result = index.isin(values)
tm.assert_numpy_array_equal(result, expected)
def test_isin_nan_common_object(self, nulls_fixture, nulls_fixture2):
# Test cartesian product of null fixtures and ensure that we don't
# mangle the various types (save a corner case with PyPy)
if PYPY and nulls_fixture is np.nan: # np.nan is float('nan') on PyPy
tm.assert_numpy_array_equal(Index(['a', nulls_fixture]).isin(
[float('nan')]), np.array([False, True]))
elif nulls_fixture is nulls_fixture2: # should preserve NA type
tm.assert_numpy_array_equal(Index(['a', nulls_fixture]).isin(
[nulls_fixture2]), np.array([False, True]))
else:
tm.assert_numpy_array_equal(Index(['a', nulls_fixture]).isin(
[nulls_fixture2]), np.array([False, False]))
def test_isin_nan_common_float64(self, nulls_fixture):
if nulls_fixture is pd.NaT:
pytest.skip("pd.NaT not compatible with Float64Index")
# Float64Index overrides isin, so must be checked separately
tm.assert_numpy_array_equal(Float64Index([1.0, nulls_fixture]).isin(
[np.nan]), np.array([False, True]))
# we cannot compare NaT with NaN
tm.assert_numpy_array_equal(Float64Index([1.0, nulls_fixture]).isin(
[pd.NaT]), np.array([False, False]))
@pytest.mark.parametrize("level", [0, -1])
@pytest.mark.parametrize("index", [
Index(['qux', 'baz', 'foo', 'bar']),
# Float64Index overrides isin, so must be checked separately
Float64Index([1.0, 2.0, 3.0, 4.0])])
def test_isin_level_kwarg(self, level, index):
values = index.tolist()[-2:] + ['nonexisting']
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(expected, index.isin(values, level=level))
index.name = 'foobar'
tm.assert_numpy_array_equal(expected,
index.isin(values, level='foobar'))
@pytest.mark.parametrize("level", [1, 10, -2])
@pytest.mark.parametrize("index", [
Index(['qux', 'baz', 'foo', 'bar']),
# Float64Index overrides isin, so must be checked separately
Float64Index([1.0, 2.0, 3.0, 4.0])])
def test_isin_level_kwarg_raises_bad_index(self, level, index):
with tm.assert_raises_regex(IndexError, 'Too many levels'):
index.isin([], level=level)
@pytest.mark.parametrize("level", [1.0, 'foobar', 'xyzzy', np.nan])
@pytest.mark.parametrize("index", [
Index(['qux', 'baz', 'foo', 'bar']),
Float64Index([1.0, 2.0, 3.0, 4.0])])
def test_isin_level_kwarg_raises_key(self, level, index):
with tm.assert_raises_regex(KeyError, 'must be same as name'):
index.isin([], level=level)
@pytest.mark.parametrize("empty", [[], Series(), np.array([])])
def test_isin_empty(self, empty):
# see gh-16991
index = Index(["a", "b"])
expected = np.array([False, False])
result = index.isin(empty)
tm.assert_numpy_array_equal(expected, result)
@pytest.mark.parametrize("values", [
[1, 2, 3, 4],
[1., 2., 3., 4.],
[True, True, True, True],
["foo", "bar", "baz", "qux"],
pd.date_range('2018-01-01', freq='D', periods=4)])
def test_boolean_cmp(self, values):
index = Index(values)
result = (index == values)
expected = np.array([True, True, True, True], dtype=bool)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("name,level", [
(None, 0), ('a', 'a')])
def test_get_level_values(self, name, level):
expected = self.strIndex.copy()
if name:
expected.name = name
result = expected.get_level_values(level)
tm.assert_index_equal(result, expected)
def test_slice_keep_name(self):
index = Index(['a', 'b'], name='asdf')
assert index.name == index[1:].name
# instance attributes of the form self.<name>Index
@pytest.mark.parametrize('index_kind',
['unicode', 'str', 'date', 'int', 'float'])
def test_join_self(self, join_type, index_kind):
res = getattr(self, '{0}Index'.format(index_kind))
joined = res.join(res, how=join_type)
assert res is joined
@pytest.mark.parametrize("method", ['strip', 'rstrip', 'lstrip'])
def test_str_attribute(self, method):
# GH9068
index = Index([' jack', 'jill ', ' jesse ', 'frank'])
expected = Index([getattr(str, method)(x) for x in index.values])
result = getattr(index.str, method)()
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("index", [
Index(range(5)), tm.makeDateIndex(10),
MultiIndex.from_tuples([('foo', '1'), ('bar', '3')]),
PeriodIndex(start='2000', end='2010', freq='A')])
def test_str_attribute_raises(self, index):
with tm.assert_raises_regex(AttributeError, 'only use .str accessor'):
index.str.repeat(2)
@pytest.mark.parametrize("expand,expected", [
(None, Index([['a', 'b', 'c'], ['d', 'e'], ['f']])),
(False, Index([['a', 'b', 'c'], ['d', 'e'], ['f']])),
(True, MultiIndex.from_tuples([('a', 'b', 'c'), ('d', 'e', np.nan),
('f', np.nan, np.nan)]))])
def test_str_split(self, expand, expected):
index = Index(['a b c', 'd e', 'f'])
if expand is not None:
result = index.str.split(expand=expand)
else:
result = index.str.split()
tm.assert_index_equal(result, expected)
def test_str_bool_return(self):
# test boolean case, should return np.array instead of boolean Index
index = Index(['a1', 'a2', 'b1', 'b2'])
result = index.str.startswith('a')
expected = np.array([True, True, False, False])
tm.assert_numpy_array_equal(result, expected)
assert isinstance(result, np.ndarray)
def test_str_bool_series_indexing(self):
index = Index(['a1', 'a2', 'b1', 'b2'])
s = Series(range(4), index=index)
result = s[s.index.str.startswith('a')]
expected = Series(range(2), index=['a1', 'a2'])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("index,expected", [
(Index(list('abcd')), True), (Index(range(4)), False)])
def test_tab_completion(self, index, expected):
# GH 9910
result = 'str' in dir(index)
assert result == expected
def test_indexing_doesnt_change_class(self):
index = Index([1, 2, 3, 'a', 'b', 'c'])
assert index[1:3].identical(pd.Index([2, 3], dtype=np.object_))
assert index[[0, 1]].identical(pd.Index([1, 2], dtype=np.object_))
def test_outer_join_sort(self):
left_index = Index(np.random.permutation(15))
right_index = tm.makeDateIndex(10)
with tm.assert_produces_warning(RuntimeWarning):
result = left_index.join(right_index, how='outer')
# right_index in this case because DatetimeIndex has join precedence
# over Int64Index
with tm.assert_produces_warning(RuntimeWarning):
expected = right_index.astype(object).union(
left_index.astype(object))
tm.assert_index_equal(result, expected)
def test_nan_first_take_datetime(self):
index = Index([pd.NaT, Timestamp('20130101'), Timestamp('20130102')])
result = index.take([-1, 0, 1])
expected = Index([index[-1], index[0], index[1]])
tm.assert_index_equal(result, expected)
def test_take_fill_value(self):
# GH 12631
index = pd.Index(list('ABC'), name='xxx')
result = index.take(np.array([1, 0, -1]))
expected = pd.Index(list('BAC'), name='xxx')
tm.assert_index_equal(result, expected)
# fill_value
result = index.take(np.array([1, 0, -1]), fill_value=True)
expected = pd.Index(['B', 'A', np.nan], name='xxx')
tm.assert_index_equal(result, expected)
# allow_fill=False
result = index.take(np.array([1, 0, -1]), allow_fill=False,
fill_value=True)
expected = pd.Index(['B', 'A', 'C'], name='xxx')
tm.assert_index_equal(result, expected)
def test_take_fill_value_none_raises(self):
index = pd.Index(list('ABC'), name='xxx')
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
with tm.assert_raises_regex(ValueError, msg):
index.take(np.array([1, 0, -2]), fill_value=True)
with tm.assert_raises_regex(ValueError, msg):
index.take(np.array([1, 0, -5]), fill_value=True)
def test_take_bad_bounds_raises(self):
index = pd.Index(list('ABC'), name='xxx')
with tm.assert_raises_regex(IndexError, 'out of bounds'):
index.take(np.array([1, -5]))
@pytest.mark.parametrize("name", [None, 'foobar'])
@pytest.mark.parametrize("labels", [
[], np.array([]), ['A', 'B', 'C'], ['C', 'B', 'A'],
np.array(['A', 'B', 'C']), np.array(['C', 'B', 'A']),
# Must preserve name even if dtype changes
pd.date_range('20130101', periods=3).values,
pd.date_range('20130101', periods=3).tolist()])
def test_reindex_preserves_name_if_target_is_list_or_ndarray(self, name,
labels):
# GH6552
index = pd.Index([0, 1, 2])
index.name = name
assert index.reindex(labels)[0].name == name
@pytest.mark.parametrize("labels", [
[], np.array([]), np.array([], dtype=np.int64)])
def test_reindex_preserves_type_if_target_is_empty_list_or_array(self,
labels):
# GH7774
index = pd.Index(list('abc'))
assert index.reindex(labels)[0].dtype.type == np.object_
@pytest.mark.parametrize("labels,dtype", [
(pd.Int64Index([]), np.int64),
(pd.Float64Index([]), np.float64),
(pd.DatetimeIndex([]), np.datetime64)])
def test_reindex_doesnt_preserve_type_if_target_is_empty_index(self,
labels,
dtype):
# GH7774
index = pd.Index(list('abc'))
assert index.reindex(labels)[0].dtype.type == dtype
def test_reindex_no_type_preserve_target_empty_mi(self):
index = pd.Index(list('abc'))
result = index.reindex(pd.MultiIndex(
[pd.Int64Index([]), pd.Float64Index([])], [[], []]))[0]
assert result.levels[0].dtype.type == np.int64
assert result.levels[1].dtype.type == np.float64
def test_groupby(self):
index = Index(range(5))
result = index.groupby(np.array([1, 1, 2, 2, 2]))
expected = {1: pd.Index([0, 1]), 2: pd.Index([2, 3, 4])}
tm.assert_dict_equal(result, expected)
@pytest.mark.parametrize("mi,expected", [
(MultiIndex.from_tuples([(1, 2), (4, 5)]), np.array([True, True])),
(MultiIndex.from_tuples([(1, 2), (4, 6)]), np.array([True, False]))])
def test_equals_op_multiindex(self, mi, expected):
# GH9785
# test comparisons of multiindex
df = pd.read_csv(StringIO('a,b,c\n1,2,3\n4,5,6'), index_col=[0, 1])
result = df.index == mi
tm.assert_numpy_array_equal(result, expected)
def test_equals_op_multiindex_identify(self):
df = pd.read_csv(StringIO('a,b,c\n1,2,3\n4,5,6'), index_col=[0, 1])
result = df.index == df.index
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("index", [
MultiIndex.from_tuples([(1, 2), (4, 5), (8, 9)]),
Index(['foo', 'bar', 'baz'])])
def test_equals_op_mismatched_multiindex_raises(self, index):
df = pd.read_csv(StringIO('a,b,c\n1,2,3\n4,5,6'), index_col=[0, 1])
with tm.assert_raises_regex(ValueError, "Lengths must match"):
df.index == index
def test_equals_op_index_vs_mi_same_length(self):
mi = MultiIndex.from_tuples([(1, 2), (4, 5), (8, 9)])
index = Index(['foo', 'bar', 'baz'])
result = mi == index
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dt_conv", [
pd.to_datetime, pd.to_timedelta])
def test_dt_conversion_preserves_name(self, dt_conv):
# GH 10875
index = pd.Index(['01:02:03', '01:02:04'], name='label')
assert index.name == dt_conv(index).name
@pytest.mark.skipif(not PY3, reason="compat test")
@pytest.mark.parametrize("index,expected", [
# ASCII
# short
(pd.Index(['a', 'bb', 'ccc']),
u"""Index(['a', 'bb', 'ccc'], dtype='object')"""),
# multiple lines
(pd.Index(['a', 'bb', 'ccc'] * 10),
u"""\
Index(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc',
'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc',
'a', 'bb', 'ccc', 'a', 'bb', 'ccc'],
dtype='object')"""),
# truncated
(pd.Index(['a', 'bb', 'ccc'] * 100),
u"""\
Index(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a',
...
'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'],
dtype='object', length=300)"""),
# Non-ASCII
# short
(pd.Index([u'あ', u'いい', u'ううう']),
u"""Index(['あ', 'いい', 'ううう'], dtype='object')"""),
# multiple lines
(pd.Index([u'あ', u'いい', u'ううう'] * 10),
(u"Index(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', "
u"'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',\n"
u" 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', "
u"'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',\n"
u" 'あ', 'いい', 'ううう', 'あ', 'いい', "
u"'ううう'],\n"
u" dtype='object')")),
# truncated
(pd.Index([u'あ', u'いい', u'ううう'] * 100),
(u"Index(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', "
u"'あ', 'いい', 'ううう', 'あ',\n"
u" ...\n"
u" 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', "
u"'ううう', 'あ', 'いい', 'ううう'],\n"
u" dtype='object', length=300)"))])
def test_string_index_repr(self, index, expected):
result = repr(index)
assert result == expected
@pytest.mark.skipif(PY3, reason="compat test")
@pytest.mark.parametrize("index,expected", [
# ASCII
# short
(pd.Index(['a', 'bb', 'ccc']),
u"""Index([u'a', u'bb', u'ccc'], dtype='object')"""),
# multiple lines
(pd.Index(['a', 'bb', 'ccc'] * 10),
u"""\
Index([u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a',
u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb',
u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc'],
dtype='object')"""),
# truncated
(pd.Index(['a', 'bb', 'ccc'] * 100),
u"""\
Index([u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a',
...
u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc'],
dtype='object', length=300)"""),
# Non-ASCII
# short
(pd.Index([u'あ', u'いい', u'ううう']),
u"""Index([u'あ', u'いい', u'ううう'], dtype='object')"""),
# multiple lines
(pd.Index([u'あ', u'いい', u'ううう'] * 10),
(u"Index([u'あ', u'いい', u'ううう', u'あ', u'いい', "
u"u'ううう', u'あ', u'いい', u'ううう', u'あ',\n"
u" u'いい', u'ううう', u'あ', u'いい', u'ううう', "
u"u'あ', u'いい', u'ううう', u'あ', u'いい',\n"
u" u'ううう', u'あ', u'いい', u'ううう', u'あ', "
u"u'いい', u'ううう', u'あ', u'いい', u'ううう'],\n"
u" dtype='object')")),
# truncated
(pd.Index([u'あ', u'いい', u'ううう'] * 100),
(u"Index([u'あ', u'いい', u'ううう', u'あ', u'いい', "
u"u'ううう', u'あ', u'いい', u'ううう', u'あ',\n"
u" ...\n"
u" u'ううう', u'あ', u'いい', u'ううう', u'あ', "
u"u'いい', u'ううう', u'あ', u'いい', u'ううう'],\n"
u" dtype='object', length=300)"))])
def test_string_index_repr_compat(self, index, expected):
result = unicode(index) # noqa
assert result == expected
@pytest.mark.skipif(not PY3, reason="compat test")
@pytest.mark.parametrize("index,expected", [
# short
(pd.Index([u'あ', u'いい', u'ううう']),
(u"Index(['あ', 'いい', 'ううう'], "
u"dtype='object')")),
# multiple lines
(pd.Index([u'あ', u'いい', u'ううう'] * 10),
(u"Index(['あ', 'いい', 'ううう', 'あ', 'いい', "
u"'ううう', 'あ', 'いい', 'ううう',\n"
u" 'あ', 'いい', 'ううう', 'あ', 'いい', "
u"'ううう', 'あ', 'いい', 'ううう',\n"
u" 'あ', 'いい', 'ううう', 'あ', 'いい', "
u"'ううう', 'あ', 'いい', 'ううう',\n"
u" 'あ', 'いい', 'ううう'],\n"
u" dtype='object')""")),
# truncated
(pd.Index([u'あ', u'いい', u'ううう'] * 100),
(u"Index(['あ', 'いい', 'ううう', 'あ', 'いい', "
u"'ううう', 'あ', 'いい', 'ううう',\n"
u" 'あ',\n"
u" ...\n"
u" 'ううう', 'あ', 'いい', 'ううう', 'あ', "
u"'いい', 'ううう', 'あ', 'いい',\n"
u" 'ううう'],\n"
u" dtype='object', length=300)"))])
def test_string_index_repr_with_unicode_option(self, index, expected):
# Enable Unicode option -----------------------------------------
with cf.option_context('display.unicode.east_asian_width', True):
result = repr(index)
assert result == expected
@pytest.mark.skipif(PY3, reason="compat test")
@pytest.mark.parametrize("index,expected", [
# short
(pd.Index([u'あ', u'いい', u'ううう']),
(u"Index([u'あ', u'いい', u'ううう'], "
u"dtype='object')")),
# multiple lines
(pd.Index([u'あ', u'いい', u'ううう'] * 10),
(u"Index([u'あ', u'いい', u'ううう', u'あ', u'いい', "
u"u'ううう', u'あ', u'いい',\n"
u" u'ううう', u'あ', u'いい', u'ううう', "
u"u'あ', u'いい', u'ううう', u'あ',\n"
u" u'いい', u'ううう', u'あ', u'いい', "
u"u'ううう', u'あ', u'いい',\n"
u" u'ううう', u'あ', u'いい', u'ううう', "
u"u'あ', u'いい', u'ううう'],\n"
u" dtype='object')")),
# truncated
(pd.Index([u'あ', u'いい', u'ううう'] * 100),
(u"Index([u'あ', u'いい', u'ううう', u'あ', u'いい', "
u"u'ううう', u'あ', u'いい',\n"
u" u'ううう', u'あ',\n"
u" ...\n"
u" u'ううう', u'あ', u'いい', u'ううう', "
u"u'あ', u'いい', u'ううう', u'あ',\n"
u" u'いい', u'ううう'],\n"
u" dtype='object', length=300)"))])
def test_string_index_repr_with_unicode_option_compat(self, index,
expected):
# Enable Unicode option -----------------------------------------
with cf.option_context('display.unicode.east_asian_width', True):
result = unicode(index) # noqa
assert result == expected
@pytest.mark.parametrize('dtype', [np.int64, np.float64])
@pytest.mark.parametrize('delta', [1, 0, -1])
def test_addsub_arithmetic(self, dtype, delta):
# GH 8142
delta = dtype(delta)
index = pd.Index([10, 11, 12], dtype=dtype)
result = index + delta
expected = pd.Index(index.values + delta, dtype=dtype)
tm.assert_index_equal(result, expected)
# this subtraction used to fail
result = index - delta
expected = pd.Index(index.values - delta, dtype=dtype)
tm.assert_index_equal(result, expected)
tm.assert_index_equal(index + index, 2 * index)
tm.assert_index_equal(index - index, 0 * index)
assert not (index - index).empty
def test_iadd_preserves_name(self):
# GH#17067, GH#19723 __iadd__ and __isub__ should preserve index name
ser = pd.Series([1, 2, 3])
ser.index.name = 'foo'
ser.index += 1
assert ser.index.name == "foo"
ser.index -= 1
assert ser.index.name == "foo"
def test_cached_properties_not_settable(self):
index = pd.Index([1, 2, 3])
with tm.assert_raises_regex(AttributeError, "Can't set attribute"):
index.is_unique = False
def test_get_duplicates_deprecated(self):
index = pd.Index([1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
index.get_duplicates()
def test_tab_complete_warning(self, ip):
# https://github.com/pandas-dev/pandas/issues/16409
pytest.importorskip('IPython', minversion="6.0.0")
from IPython.core.completer import provisionalcompleter
code = "import pandas as pd; idx = pd.Index([1, 2])"
ip.run_code(code)
with tm.assert_produces_warning(None):
with provisionalcompleter('ignore'):
list(ip.Completer.completions('idx.', 4))
class TestMixedIntIndex(Base):
# Mostly the tests from common.py for which the results differ
# in py2 and py3 because ints and strings are uncomparable in py3
# (GH 13514)
_holder = Index
def setup_method(self, method):
self.indices = dict(mixedIndex=Index([0, 'a', 1, 'b', 2, 'c']))
self.setup_indices()
def create_index(self):
return self.mixedIndex
def test_argsort(self):
index = self.create_index()
if PY36:
with tm.assert_raises_regex(TypeError, "'>|<' not supported"):
result = index.argsort()
elif PY3:
with tm.assert_raises_regex(TypeError, "unorderable types"):
result = index.argsort()
else:
result = index.argsort()
expected = np.array(index).argsort()
tm.assert_numpy_array_equal(result, expected, check_dtype=False)
def test_numpy_argsort(self):
index = self.create_index()
if PY36:
with tm.assert_raises_regex(TypeError, "'>|<' not supported"):
result = np.argsort(index)
elif PY3:
with tm.assert_raises_regex(TypeError, "unorderable types"):
result = np.argsort(index)
else:
result = np.argsort(index)
expected = index.argsort()
tm.assert_numpy_array_equal(result, expected)
def test_copy_name(self):
# Check that "name" argument passed at initialization is honoured
# GH12309
index = self.create_index()
first = index.__class__(index, copy=True, name='mario')
second = first.__class__(first, copy=False)
# Even though "copy=False", we want a new object.
assert first is not second
tm.assert_index_equal(first, second)
assert first.name == 'mario'
assert second.name == 'mario'
s1 = Series(2, index=first)
s2 = Series(3, index=second[:-1])
warning_type = RuntimeWarning if PY3 else None
with tm.assert_produces_warning(warning_type):
# Python 3: Unorderable types
s3 = s1 * s2
assert s3.index.name == 'mario'
def test_copy_name2(self):
# Check that adding a "name" parameter to the copy is honored
# GH14302
index = pd.Index([1, 2], name='MyName')
index1 = index.copy()
tm.assert_index_equal(index, index1)
index2 = index.copy(name='NewName')
tm.assert_index_equal(index, index2, check_names=False)
assert index.name == 'MyName'
assert index2.name == 'NewName'
index3 = index.copy(names=['NewName'])
tm.assert_index_equal(index, index3, check_names=False)
assert index.name == 'MyName'
assert index.names == ['MyName']
assert index3.name == 'NewName'
assert index3.names == ['NewName']
def test_union_base(self):
index = self.create_index()
first = index[3:]
second = index[:5]
if PY3:
# unorderable types
warn_type = RuntimeWarning
else:
warn_type = None
with tm.assert_produces_warning(warn_type):
result = first.union(second)
expected = Index(['b', 2, 'c', 0, 'a', 1])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("klass", [
np.array, Series, list])
def test_union_different_type_base(self, klass):
# GH 10149
index = self.create_index()
first = index[3:]
second = index[:5]
if PY3:
# unorderable types
warn_type = RuntimeWarning
else:
warn_type = None
with tm.assert_produces_warning(warn_type):
result = first.union(klass(second.values))
assert tm.equalContents(result, index)
def test_intersection_base(self):
# (same results for py2 and py3 but sortedness not tested elsewhere)
index = self.create_index()
first = index[:5]
second = index[:3]
result = first.intersection(second)
expected = Index([0, 'a', 1])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("klass", [
np.array, Series, list])
def test_intersection_different_type_base(self, klass):
# GH 10149
index = self.create_index()
first = index[:5]
second = index[:3]
result = first.intersection(klass(second.values))
assert tm.equalContents(result, second)
def test_difference_base(self):
# (same results for py2 and py3 but sortedness not tested elsewhere)
index = self.create_index()
first = index[:4]
second = index[3:]
result = first.difference(second)
expected = Index([0, 1, 'a'])
tm.assert_index_equal(result, expected)
def test_symmetric_difference(self):
# (same results for py2 and py3 but sortedness not tested elsewhere)
index = self.create_index()
first = index[:4]
second = index[3:]
result = first.symmetric_difference(second)
expected = Index([0, 1, 2, 'a', 'c'])
tm.assert_index_equal(result, expected)
def test_logical_compat(self):
index = self.create_index()
assert index.all() == index.values.all()
assert index.any() == index.values.any()
@pytest.mark.parametrize("how", ['any', 'all'])
@pytest.mark.parametrize("dtype", [
None, object, 'category'])
@pytest.mark.parametrize("vals,expected", [
([1, 2, 3], [1, 2, 3]), ([1., 2., 3.], [1., 2., 3.]),
([1., 2., np.nan, 3.], [1., 2., 3.]),
(['A', 'B', 'C'], ['A', 'B', 'C']),
(['A', np.nan, 'B', 'C'], ['A', 'B', 'C'])])
def test_dropna(self, how, dtype, vals, expected):
# GH 6194
index = pd.Index(vals, dtype=dtype)
result = index.dropna(how=how)
expected = pd.Index(expected, dtype=dtype)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("how", ['any', 'all'])
@pytest.mark.parametrize("index,expected", [
(pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03']),
pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'])),
(pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03', pd.NaT]),
pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'])),
(pd.TimedeltaIndex(['1 days', '2 days', '3 days']),
pd.TimedeltaIndex(['1 days', '2 days', '3 days'])),
(pd.TimedeltaIndex([pd.NaT, '1 days', '2 days', '3 days', pd.NaT]),
pd.TimedeltaIndex(['1 days', '2 days', '3 days'])),
(pd.PeriodIndex(['2012-02', '2012-04', '2012-05'], freq='M'),
pd.PeriodIndex(['2012-02', '2012-04', '2012-05'], freq='M')),
(pd.PeriodIndex(['2012-02', '2012-04', 'NaT', '2012-05'], freq='M'),
pd.PeriodIndex(['2012-02', '2012-04', '2012-05'], freq='M'))])
def test_dropna_dt_like(self, how, index, expected):
result = index.dropna(how=how)
tm.assert_index_equal(result, expected)
def test_dropna_invalid_how_raises(self):
msg = "invalid how option: xxx"
with tm.assert_raises_regex(ValueError, msg):
pd.Index([1, 2, 3]).dropna(how='xxx')
def test_get_combined_index(self):
result = _get_combined_index([])
expected = Index([])
tm.assert_index_equal(result, expected)
def test_repeat(self):
repeats = 2
index = pd.Index([1, 2, 3])
expected = pd.Index([1, 1, 2, 2, 3, 3])
result = index.repeat(repeats)
tm.assert_index_equal(result, expected)
def test_repeat_warns_n_keyword(self):
index = pd.Index([1, 2, 3])
expected = pd.Index([1, 1, 2, 2, 3, 3])
with tm.assert_produces_warning(FutureWarning):
result = index.repeat(n=2)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("index", [
pd.Index([np.nan]), pd.Index([np.nan, 1]),
pd.Index([1, 2, np.nan]), pd.Index(['a', 'b', np.nan]),
pd.to_datetime(['NaT']), pd.to_datetime(['NaT', '2000-01-01']),
pd.to_datetime(['2000-01-01', 'NaT', '2000-01-02']),
pd.to_timedelta(['1 day', 'NaT'])])
def test_is_monotonic_na(self, index):
assert not index.is_monotonic_increasing
assert not index.is_monotonic_decreasing
assert not index._is_strictly_monotonic_increasing
assert not index._is_strictly_monotonic_decreasing
def test_repr_summary(self):
with cf.option_context('display.max_seq_items', 10):
result = repr(pd.Index(np.arange(1000)))
assert len(result) < 200
assert "..." in result
@pytest.mark.parametrize("klass", [Series, DataFrame])
def test_int_name_format(self, klass):
index = Index(['a', 'b', 'c'], name=0)
result = klass(lrange(3), index=index)
assert '0' in repr(result)
def test_print_unicode_columns(self):
df = pd.DataFrame({u("\u05d0"): [1, 2, 3],
"\u05d1": [4, 5, 6],
"c": [7, 8, 9]})
repr(df.columns) # should not raise UnicodeDecodeError
@pytest.mark.parametrize("func,compat_func", [
(str, text_type), # unicode string
(bytes, str) # byte string
])
def test_with_unicode(self, func, compat_func):
index = Index(lrange(1000))
if PY3:
func(index)
else:
compat_func(index)
def test_intersect_str_dates(self):
dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)]
index1 = Index(dt_dates, dtype=object)
index2 = Index(['aa'], dtype=object)
result = index2.intersection(index1)
expected = Index([], dtype=object)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('op', [operator.eq, operator.ne,
operator.gt, operator.ge,
operator.lt, operator.le])
def test_comparison_tzawareness_compat(self, op):
# GH#18162
dr = pd.date_range('2016-01-01', periods=6)
dz = dr.tz_localize('US/Pacific')
# Check that there isn't a problem aware-aware and naive-naive do not
# raise
naive_series = Series(dr)
aware_series = Series(dz)
with pytest.raises(TypeError):
op(dz, naive_series)
with pytest.raises(TypeError):
op(dr, aware_series)
# TODO: implement _assert_tzawareness_compat for the reverse
# comparison with the Series on the left-hand side
class TestIndexUtils(object):
@pytest.mark.parametrize('data, names, expected', [
([[1, 2, 3]], None, Index([1, 2, 3])),
([[1, 2, 3]], ['name'], Index([1, 2, 3], name='name')),
([['a', 'a'], ['c', 'd']], None,
MultiIndex([['a'], ['c', 'd']], [[0, 0], [0, 1]])),
([['a', 'a'], ['c', 'd']], ['L1', 'L2'],
MultiIndex([['a'], ['c', 'd']], [[0, 0], [0, 1]],
names=['L1', 'L2'])),
])
def test_ensure_index_from_sequences(self, data, names, expected):
result = _ensure_index_from_sequences(data, names)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('opname', ['eq', 'ne', 'le', 'lt', 'ge', 'gt',
'add', 'radd', 'sub', 'rsub',
'mul', 'rmul', 'truediv', 'rtruediv',
'floordiv', 'rfloordiv',
'pow', 'rpow', 'mod', 'divmod'])
def test_generated_op_names(opname, indices):
index = indices
if isinstance(index, ABCIndex) and opname == 'rsub':
# pd.Index.__rsub__ does not exist; though the method does exist
# for subclasses. see GH#19723
return
opname = '__{name}__'.format(name=opname)
method = getattr(index, opname)
assert method.__name__ == opname
@pytest.mark.parametrize('index_maker', tm.index_subclass_makers_generator())
def test_index_subclass_constructor_wrong_kwargs(index_maker):
# GH #19348
with tm.assert_raises_regex(TypeError, 'unexpected keyword argument'):
index_maker(foo='bar')
| bsd-3-clause |
mudbungie/NetExplorer | env/share/doc/networkx-1.11/examples/drawing/giant_component.py | 15 | 2287 | #!/usr/bin/env python
"""
This example illustrates the sudden appearance of a
giant connected component in a binomial random graph.
Requires pygraphviz and matplotlib to draw.
"""
# Copyright (C) 2006-2016
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
try:
import matplotlib.pyplot as plt
except:
raise
import networkx as nx
import math
try:
import pygraphviz
from networkx.drawing.nx_agraph import graphviz_layout
layout = graphviz_layout
except ImportError:
try:
import pydotplus
from networkx.drawing.nx_pydot import graphviz_layout
layout = graphviz_layout
except ImportError:
print("PyGraphviz and PyDotPlus not found;\n"
"drawing with spring layout;\n"
"will be slow.")
layout = nx.spring_layout
n=150 # 150 nodes
# p value at which giant component (of size log(n) nodes) is expected
p_giant=1.0/(n-1)
# p value at which graph is expected to become completely connected
p_conn=math.log(n)/float(n)
# the following range of p values should be close to the threshold
pvals=[0.003, 0.006, 0.008, 0.015]
region=220 # for pylab 2x2 subplot layout
plt.subplots_adjust(left=0,right=1,bottom=0,top=0.95,wspace=0.01,hspace=0.01)
for p in pvals:
G=nx.binomial_graph(n,p)
pos=layout(G)
region+=1
plt.subplot(region)
plt.title("p = %6.3f"%(p))
nx.draw(G,pos,
with_labels=False,
node_size=10
)
# identify largest connected component
Gcc=sorted(nx.connected_component_subgraphs(G), key = len, reverse=True)
G0=Gcc[0]
nx.draw_networkx_edges(G0,pos,
with_labels=False,
edge_color='r',
width=6.0
)
# show other connected components
for Gi in Gcc[1:]:
if len(Gi)>1:
nx.draw_networkx_edges(Gi,pos,
with_labels=False,
edge_color='r',
alpha=0.3,
width=5.0
)
plt.savefig("giant_component.png")
plt.show() # display
| mit |
chunweiyuan/xarray | xarray/testing.py | 1 | 6855 | """Testing functions exposed to the user API"""
from collections import OrderedDict
import numpy as np
import pandas as pd
from xarray.core import duck_array_ops
from xarray.core import formatting
from xarray.core.indexes import default_indexes
def _decode_string_data(data):
if data.dtype.kind == 'S':
return np.core.defchararray.decode(data, 'utf-8', 'replace')
return data
def _data_allclose_or_equiv(arr1, arr2, rtol=1e-05, atol=1e-08,
decode_bytes=True):
if any(arr.dtype.kind == 'S' for arr in [arr1, arr2]) and decode_bytes:
arr1 = _decode_string_data(arr1)
arr2 = _decode_string_data(arr2)
exact_dtypes = ['M', 'm', 'O', 'S', 'U']
if any(arr.dtype.kind in exact_dtypes for arr in [arr1, arr2]):
return duck_array_ops.array_equiv(arr1, arr2)
else:
return duck_array_ops.allclose_or_equiv(
arr1, arr2, rtol=rtol, atol=atol)
def assert_equal(a, b):
"""Like :py:func:`numpy.testing.assert_array_equal`, but for xarray
objects.
Raises an AssertionError if two objects are not equal. This will match
data values, dimensions and coordinates, but not names or attributes
(except for Dataset objects for which the variable names must match).
Arrays with NaN in the same location are considered equal.
Parameters
----------
a : xarray.Dataset, xarray.DataArray or xarray.Variable
The first object to compare.
b : xarray.Dataset, xarray.DataArray or xarray.Variable
The second object to compare.
See also
--------
assert_identical, assert_allclose, Dataset.equals, DataArray.equals,
numpy.testing.assert_array_equal
"""
import xarray as xr
__tracebackhide__ = True # noqa: F841
assert type(a) == type(b) # noqa
if isinstance(a, (xr.Variable, xr.DataArray)):
assert a.equals(b), formatting.diff_array_repr(a, b, 'equals')
elif isinstance(a, xr.Dataset):
assert a.equals(b), formatting.diff_dataset_repr(a, b, 'equals')
else:
raise TypeError('{} not supported by assertion comparison'
.format(type(a)))
def assert_identical(a, b):
"""Like :py:func:`xarray.testing.assert_equal`, but also matches the
objects' names and attributes.
Raises an AssertionError if two objects are not identical.
Parameters
----------
a : xarray.Dataset, xarray.DataArray or xarray.Variable
The first object to compare.
b : xarray.Dataset, xarray.DataArray or xarray.Variable
The second object to compare.
See also
--------
assert_equal, assert_allclose, Dataset.equals, DataArray.equals
"""
import xarray as xr
__tracebackhide__ = True # noqa: F841
assert type(a) == type(b) # noqa
if isinstance(a, xr.Variable):
assert a.identical(b), formatting.diff_array_repr(a, b, 'identical')
elif isinstance(a, xr.DataArray):
assert a.name == b.name
assert a.identical(b), formatting.diff_array_repr(a, b, 'identical')
elif isinstance(a, (xr.Dataset, xr.Variable)):
assert a.identical(b), formatting.diff_dataset_repr(a, b, 'identical')
else:
raise TypeError('{} not supported by assertion comparison'
.format(type(a)))
def assert_allclose(a, b, rtol=1e-05, atol=1e-08, decode_bytes=True):
"""Like :py:func:`numpy.testing.assert_allclose`, but for xarray objects.
Raises an AssertionError if two objects are not equal up to desired
tolerance.
Parameters
----------
a : xarray.Dataset, xarray.DataArray or xarray.Variable
The first object to compare.
b : xarray.Dataset, xarray.DataArray or xarray.Variable
The second object to compare.
rtol : float, optional
Relative tolerance.
atol : float, optional
Absolute tolerance.
decode_bytes : bool, optional
Whether byte dtypes should be decoded to strings as UTF-8 or not.
This is useful for testing serialization methods on Python 3 that
return saved strings as bytes.
See also
--------
assert_identical, assert_equal, numpy.testing.assert_allclose
"""
import xarray as xr
__tracebackhide__ = True # noqa: F841
assert type(a) == type(b) # noqa
kwargs = dict(rtol=rtol, atol=atol, decode_bytes=decode_bytes)
if isinstance(a, xr.Variable):
assert a.dims == b.dims
allclose = _data_allclose_or_equiv(a.values, b.values, **kwargs)
assert allclose, '{}\n{}'.format(a.values, b.values)
elif isinstance(a, xr.DataArray):
assert_allclose(a.variable, b.variable, **kwargs)
assert set(a.coords) == set(b.coords)
for v in a.coords.variables:
# can't recurse with this function as coord is sometimes a
# DataArray, so call into _data_allclose_or_equiv directly
allclose = _data_allclose_or_equiv(a.coords[v].values,
b.coords[v].values, **kwargs)
assert allclose, '{}\n{}'.format(a.coords[v].values,
b.coords[v].values)
elif isinstance(a, xr.Dataset):
assert set(a.data_vars) == set(b.data_vars)
assert set(a.coords) == set(b.coords)
for k in list(a.variables) + list(a.coords):
assert_allclose(a[k], b[k], **kwargs)
else:
raise TypeError('{} not supported by assertion comparison'
.format(type(a)))
def _assert_indexes_invariants_checks(indexes, possible_coord_variables, dims):
import xarray as xr
assert isinstance(indexes, OrderedDict), indexes
assert all(isinstance(v, pd.Index) for v in indexes.values()), \
{k: type(v) for k, v in indexes.items()}
index_vars = {k for k, v in possible_coord_variables.items()
if isinstance(v, xr.IndexVariable)}
assert indexes.keys() <= index_vars, (set(indexes), index_vars)
# Note: when we support non-default indexes, these checks should be opt-in
# only!
defaults = default_indexes(possible_coord_variables, dims)
assert indexes.keys() == defaults.keys(), \
(set(indexes), set(defaults))
assert all(v.equals(defaults[k]) for k, v in indexes.items()), \
(indexes, defaults)
def _assert_indexes_invariants(a):
"""Separate helper function for checking indexes invariants only."""
import xarray as xr
if isinstance(a, xr.DataArray):
if a._indexes is not None:
_assert_indexes_invariants_checks(a._indexes, a._coords, a.dims)
elif isinstance(a, xr.Dataset):
if a._indexes is not None:
_assert_indexes_invariants_checks(
a._indexes, a._variables, a._dims)
elif isinstance(a, xr.Variable):
# no indexes
pass
| apache-2.0 |
helloworldajou/webserver | util/detect-outliers.py | 10 | 2768 | #!/usr/bin/env python2
#
# Detect outlier faces (not of the same person) in a directory
# of aligned images.
# Brandon Amos
# 2016/02/14
#
# Copyright 2015-2016 Carnegie Mellon University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
start = time.time()
import argparse
import os
import glob
import numpy as np
np.set_printoptions(precision=2)
from sklearn.metrics.pairwise import euclidean_distances
import cv2
import openface
fileDir = os.path.dirname(os.path.realpath(__file__))
modelDir = os.path.join(fileDir, '..', 'models')
openfaceModelDir = os.path.join(modelDir, 'openface')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--networkModel', type=str, help="Path to Torch network model.",
default=os.path.join(openfaceModelDir, 'nn4.small2.v1.t7'))
parser.add_argument('--imgDim', type=int,
help="Default image dimension.", default=96)
parser.add_argument('--cuda', action='store_true')
parser.add_argument('--threshold', type=float, default=0.9)
parser.add_argument('--delete', action='store_true', help='Delete the outliers.')
parser.add_argument('directory')
args = parser.parse_args()
net = openface.TorchNeuralNet(args.networkModel, args.imgDim, cuda=args.cuda)
reps = []
paths = sorted(list(glob.glob(os.path.join(args.directory, '*.png'))))
print("=== {} ===".format(args.directory))
for imgPath in paths:
if cv2.imread(imgPath) is None:
print("Warning: Skipping bad image file: {}".format(imgPath))
if args.delete:
# Remove the file if it's not a valid image.
os.remove(imgPath)
else:
reps.append(net.forwardPath(imgPath))
mean = np.mean(reps, axis=0)
dists = euclidean_distances(reps, mean)
outliers = []
for path, dist in zip(paths, dists):
dist = dist.take(0)
if dist > args.threshold:
outliers.append((path, dist))
print("Found {} outlier(s) from {} images.".format(len(outliers), len(paths)))
for path, dist in outliers:
print(" + {} ({:0.2f})".format(path, dist))
if args.delete:
os.remove(path)
if __name__ == '__main__':
main()
| apache-2.0 |
lodemo/CATANA | src/face_recognition/collabDetectionTest.py | 1 | 2819 | # -*- coding: utf-8 -*-
'''
Due to memory usage problems, if features array is present as file on-disk,
its loaded here and used for computing sparse distance matrix.
Features array cant be loaded as numpy memmap, as its not a "perfect" array -> every row has a different length.
'''
from __future__ import unicode_literals
from concurrent.futures import *
import os
import sys
import time
import numpy as np
import pandas as pa
import cPickle as cp
import json
import math
from threading import Thread
from database import *
from scipy.spatial.distance import cdist, pdist, squareform
from scipy.stats import describe
import itertools
import string
import hdbscan
from sklearn.cluster import DBSCAN
from sklearn.cluster import AgglomerativeClustering
import facedist
import facedist32
import networkx as nx
fileDir = os.path.dirname(os.path.realpath(__file__))
# Load features array from disk
features = np.load(os.path.join(fileDir,'features_30sec_fixed.npy'))
print 'Loaded feature:', features.shape
test = features[:10000]
print test.shape
start = time.time()
D = facedist.mean_dist(test)
print 'D64 sys-size:', sys.getsizeof(D)
print 'D64 np nbytes:', D.nbytes
nrow = len(test)
dense_distances = np.zeros( (nrow, nrow), dtype=np.double)
for ii in range(nrow):
for jj in range(ii+1, nrow):
nn = ii+jj*(jj-1)/2
rd = D[nn]
dense_distances[ii, jj] = rd
dense_distances[jj, ii] = rd
print 'Dense64 sys-size:', sys.getsizeof(dense_distances)
print 'Dense64 np nbytes:', dense_distances.nbytes
db = hdbscan.HDBSCAN(min_cluster_size=2, metric='precomputed', core_dist_n_jobs=-1).fit(dense_distances)
labels = db.labels_
probabilities = db.probabilities_
pers = db.cluster_persistence_
funique, fcounts = np.unique(labels, return_counts=True)
processTime = time.time() - start
print 'facedist distance computation took', processTime, 'found', len(funique)
start = time.time()
D = facedist32.mean_dist(test)
print 'D32 sys-size:', sys.getsizeof(D)
print 'D32 np nbytes:', D.nbytes
nrow = len(test)
dense_distances = np.zeros( (nrow, nrow), dtype=np.float32)
for ii in range(nrow):
for jj in range(ii+1, nrow):
nn = ii+jj*(jj-1)/2
rd = D[nn]
dense_distances[ii, jj] = rd
dense_distances[jj, ii] = rd
print 'Dense32 sys-size:', sys.getsizeof(dense_distances)
print 'Dense32 np nbytes:', dense_distances.nbytes
db = hdbscan.HDBSCAN(min_cluster_size=2, metric='precomputed', core_dist_n_jobs=-1).fit(dense_distances)
labels = db.labels_
probabilities = db.probabilities_
pers = db.cluster_persistence_
eunique, ecounts = np.unique(labels, return_counts=True)
processTime = time.time() - start
print 'facedist distance computation took', processTime, 'found', len(eunique)
print fcounts
print ecounts
| mit |
kmfarley11/8Bitify | source/matrix_ops.py | 1 | 4131 | #!/usr/bin/python3
### matrix_ops.py
#
# Description: functions included allow for various (audio) data operations
# (i.e. eight_bitify, superimpose_square)
#
# Requires: numpy, scipy
# (sudo apt-get install python3-numpy python3-scipy)
#
# Tests available: python3 -m doctest matrix_ops.py
#
###
import numpy as np # matrix ops via numpy: audio data of shape (num_data, num_channels)
from scipy import signal
# global vars for convenience / testing
audioFileDir = "../Test Files/"
testOrchFile = "383929__oymaldonado__uplifting-orchestra.wav"
testGuitarFile = "ThuMar2302_40_45UTC2017.wav"
testWavFileOut = "testWavOut.wav"
def eight_bitify(data, bits=8):
"""
takes numpy matrix of audio data and desired bit resolution, returns modded numpy data
essentially just mods the resolution of the data, defaults to 8 bit (255 values)
>>> t = np.array([1, 2, -5, 4, 5, 6, 120, 355, -500])
>>> t
array([ 1, 2, -5, 4, 5, 6, 120, 355, -500])
>>> t.shape
(9,)
>>> result = eight_bitify(t)
>>> result.shape
(9,)
>>> result
array([ 0, 0, -7, 2, 2, 2, 116, 355, -500])
"""
# get max / min, then set discrete points for resolution depicted
moddedData = np.array(data)
hi = np.max(data)
lo = np.min(data)
bins = np.linspace(lo, hi, num=(2**bits))
# discretize the data into our new resolution set of points (less noticeable for small data...)
bidxs = np.digitize(moddedData, bins, right=False)
moddedData = bins[bidxs-1]
# return discretized data
return moddedData.astype(str(data.dtype))
def create_square(rate, sampleLength, frequency=100, amplitude=0.05):
"""
Create and return a NumPy square wave with the given parameters.
Rate is the sample rate, in samples per second. sampleLength is num samples in data
Therefore, create (rate/frequency) samples of amplitude before switching polarity.
>>> create_square(500, 10, frequency=500, amplitude=1)
array([ 1., -1., -1., 1., 1., -1., -1., 1., 1., -1.])
"""
totalTime = sampleLength * rate
t = np.linspace(0, totalTime, sampleLength) # this creates t indicating seconds with the same dimensions as incoming data
square = signal.square(2 * np.pi * frequency * t)
square *= amplitude
return square
def superimpose(wave1, wave2):
"""
Superimpose two numpy arrays.
>>> a = np.array([1,1,1])
>>> b = np.array([2,2,2])
>>> superimpose(a,b)
array([3, 3, 3])
>>> c = np.array([1])
>>> d = np.array([2,2,2])
>>> superimpose(d,c)
array([3, 2, 2])
"""
assert type(wave1) == np.ndarray
assert type(wave2) == np.ndarray
w1type = str(wave1.dtype)
w2type = str(wave2.dtype)
if len(wave1) > len(wave2):
zeros = np.zeros(len(wave1) - len(wave2))
wave2 = np.concatenate((wave2, zeros))
elif len(wave2) > len(wave1):
zeros = np.zeros(len(wave2) - len(wave1))
wave1 = np.concatenate((wave1, zeros))
return wave1.astype(w1type) + wave2.astype(w2type)
convolve = signal.convolve
'''
def convolve(wave1, wave2, mode='full', method='auto'):
"""
convolute one wave on top of another
needs doctests and verification for what we are trying to do with it
"""
return signal.convolve(wave1, wave2, mode, method).astype(str(wave1.dtype))
'''
def split_channel(data):
rows, cols = data.shape
return data[:,0], data[:,1]
def make_mono(data):
"""
given audio data matrix with any # channels, compress to 1 channel and return that data
>>> a = np.array([[1, 2, 3], [4, 5, 6], [0, 0, 0]])
>>> make_mono(a)
array([2, 5, 0])
>>> b = np.array([[0, 1], [-1, 3], [0, 0]])
>>> make_mono(b)
array([0, 1, 0])
"""
return (data.sum(axis=1)/data.shape[-1]).astype(str(data.dtype))
def plot(t, array):
import matplotlib.pyplot as plt
plt.plot(t, array)
plt.show()
| gpl-3.0 |
IshankGulati/scikit-learn | sklearn/utils/fixes.py | 14 | 13240 | """Compatibility fixes for older version of python, numpy and scipy
If you add content to this file, please give the version of the package
at which the fixe is no longer needed.
"""
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# Fabian Pedregosa <[email protected]>
# Lars Buitinck
#
# License: BSD 3 clause
import warnings
import sys
import functools
import os
import errno
import numpy as np
import scipy.sparse as sp
import scipy
try:
from inspect import signature
except ImportError:
from ..externals.funcsigs import signature
def _parse_version(version_string):
version = []
for x in version_string.split('.'):
try:
version.append(int(x))
except ValueError:
# x may be of the form dev-1ea1592
version.append(x)
return tuple(version)
np_version = _parse_version(np.__version__)
sp_version = _parse_version(scipy.__version__)
try:
from scipy.special import expit # SciPy >= 0.10
with np.errstate(invalid='ignore', over='ignore'):
if np.isnan(expit(1000)): # SciPy < 0.14
raise ImportError("no stable expit in scipy.special")
except ImportError:
def expit(x, out=None):
"""Logistic sigmoid function, ``1 / (1 + exp(-x))``.
See sklearn.utils.extmath.log_logistic for the log of this function.
"""
if out is None:
out = np.empty(np.atleast_1d(x).shape, dtype=np.float64)
out[:] = x
# 1 / (1 + exp(-x)) = (1 + tanh(x / 2)) / 2
# This way of computing the logistic is both fast and stable.
out *= .5
np.tanh(out, out)
out += 1
out *= .5
return out.reshape(np.shape(x))
# little danse to see if np.copy has an 'order' keyword argument
# Supported since numpy 1.7.0
if 'order' in signature(np.copy).parameters:
def safe_copy(X):
# Copy, but keep the order
return np.copy(X, order='K')
else:
# Before an 'order' argument was introduced, numpy wouldn't muck with
# the ordering
safe_copy = np.copy
try:
if (not np.allclose(np.divide(.4, 1, casting="unsafe"),
np.divide(.4, 1, casting="unsafe", dtype=np.float64))
or not np.allclose(np.divide(.4, 1), .4)):
raise TypeError('Divide not working with dtype: '
'https://github.com/numpy/numpy/issues/3484')
divide = np.divide
except TypeError:
# Compat for old versions of np.divide that do not provide support for
# the dtype args
def divide(x1, x2, out=None, dtype=None):
out_orig = out
if out is None:
out = np.asarray(x1, dtype=dtype)
if out is x1:
out = x1.copy()
else:
if out is not x1:
out[:] = x1
if dtype is not None and out.dtype != dtype:
out = out.astype(dtype)
out /= x2
if out_orig is None and np.isscalar(x1):
out = np.asscalar(out)
return out
try:
np.array(5).astype(float, copy=False)
except TypeError:
# Compat where astype accepted no copy argument (numpy < 1.7.0)
def astype(array, dtype, copy=True):
if not copy and array.dtype == dtype:
return array
return array.astype(dtype)
else:
astype = np.ndarray.astype
try:
with warnings.catch_warnings(record=True):
# Don't raise the numpy deprecation warnings that appear in
# 1.9, but avoid Python bug due to simplefilter('ignore')
warnings.simplefilter('always')
sp.csr_matrix([1.0, 2.0, 3.0]).max(axis=0)
except (TypeError, AttributeError):
# in scipy < 14.0, sparse matrix min/max doesn't accept an `axis` argument
# the following code is taken from the scipy 0.14 codebase
def _minor_reduce(X, ufunc):
major_index = np.flatnonzero(np.diff(X.indptr))
if X.data.size == 0 and major_index.size == 0:
# Numpy < 1.8.0 don't handle empty arrays in reduceat
value = np.zeros_like(X.data)
else:
value = ufunc.reduceat(X.data, X.indptr[major_index])
return major_index, value
def _min_or_max_axis(X, axis, min_or_max):
N = X.shape[axis]
if N == 0:
raise ValueError("zero-size array to reduction operation")
M = X.shape[1 - axis]
mat = X.tocsc() if axis == 0 else X.tocsr()
mat.sum_duplicates()
major_index, value = _minor_reduce(mat, min_or_max)
not_full = np.diff(mat.indptr)[major_index] < N
value[not_full] = min_or_max(value[not_full], 0)
mask = value != 0
major_index = np.compress(mask, major_index)
value = np.compress(mask, value)
from scipy.sparse import coo_matrix
if axis == 0:
res = coo_matrix((value, (np.zeros(len(value)), major_index)),
dtype=X.dtype, shape=(1, M))
else:
res = coo_matrix((value, (major_index, np.zeros(len(value)))),
dtype=X.dtype, shape=(M, 1))
return res.A.ravel()
def _sparse_min_or_max(X, axis, min_or_max):
if axis is None:
if 0 in X.shape:
raise ValueError("zero-size array to reduction operation")
zero = X.dtype.type(0)
if X.nnz == 0:
return zero
m = min_or_max.reduce(X.data.ravel())
if X.nnz != np.product(X.shape):
m = min_or_max(zero, m)
return m
if axis < 0:
axis += 2
if (axis == 0) or (axis == 1):
return _min_or_max_axis(X, axis, min_or_max)
else:
raise ValueError("invalid axis, use 0 for rows, or 1 for columns")
def sparse_min_max(X, axis):
return (_sparse_min_or_max(X, axis, np.minimum),
_sparse_min_or_max(X, axis, np.maximum))
else:
def sparse_min_max(X, axis):
return (X.min(axis=axis).toarray().ravel(),
X.max(axis=axis).toarray().ravel())
try:
from numpy import argpartition
except ImportError:
# numpy.argpartition was introduced in v 1.8.0
def argpartition(a, kth, axis=-1, kind='introselect', order=None):
return np.argsort(a, axis=axis, order=order)
try:
from numpy import partition
except ImportError:
warnings.warn('Using `sort` instead of partition.'
'Upgrade numpy to 1.8 for better performace on large number'
'of clusters')
def partition(a, kth, axis=-1, kind='introselect', order=None):
return np.sort(a, axis=axis, order=order)
if np_version < (1, 7):
# Prior to 1.7.0, np.frombuffer wouldn't work for empty first arg.
def frombuffer_empty(buf, dtype):
if len(buf) == 0:
return np.empty(0, dtype=dtype)
else:
return np.frombuffer(buf, dtype=dtype)
else:
frombuffer_empty = np.frombuffer
if np_version < (1, 8):
def in1d(ar1, ar2, assume_unique=False, invert=False):
# Backport of numpy function in1d 1.8.1 to support numpy 1.6.2
# Ravel both arrays, behavior for the first array could be different
ar1 = np.asarray(ar1).ravel()
ar2 = np.asarray(ar2).ravel()
# This code is significantly faster when the condition is satisfied.
if len(ar2) < 10 * len(ar1) ** 0.145:
if invert:
mask = np.ones(len(ar1), dtype=np.bool)
for a in ar2:
mask &= (ar1 != a)
else:
mask = np.zeros(len(ar1), dtype=np.bool)
for a in ar2:
mask |= (ar1 == a)
return mask
# Otherwise use sorting
if not assume_unique:
ar1, rev_idx = np.unique(ar1, return_inverse=True)
ar2 = np.unique(ar2)
ar = np.concatenate((ar1, ar2))
# We need this to be a stable sort, so always use 'mergesort'
# here. The values from the first array should always come before
# the values from the second array.
order = ar.argsort(kind='mergesort')
sar = ar[order]
if invert:
bool_ar = (sar[1:] != sar[:-1])
else:
bool_ar = (sar[1:] == sar[:-1])
flag = np.concatenate((bool_ar, [invert]))
indx = order.argsort(kind='mergesort')[:len(ar1)]
if assume_unique:
return flag[indx]
else:
return flag[indx][rev_idx]
else:
from numpy import in1d
if sp_version < (0, 15):
# Backport fix for scikit-learn/scikit-learn#2986 / scipy/scipy#4142
from ._scipy_sparse_lsqr_backport import lsqr as sparse_lsqr
else:
from scipy.sparse.linalg import lsqr as sparse_lsqr
def parallel_helper(obj, methodname, *args, **kwargs):
"""Helper to workaround Python 2 limitations of pickling instance methods"""
return getattr(obj, methodname)(*args, **kwargs)
if np_version < (1, 6, 2):
# Allow bincount to accept empty arrays
# https://github.com/numpy/numpy/commit/40f0844846a9d7665616b142407a3d74cb65a040
def bincount(x, weights=None, minlength=None):
if len(x) > 0:
return np.bincount(x, weights, minlength)
else:
if minlength is None:
minlength = 0
minlength = np.asscalar(np.asarray(minlength, dtype=np.intp))
return np.zeros(minlength, dtype=np.intp)
else:
from numpy import bincount
if 'exist_ok' in signature(os.makedirs).parameters:
makedirs = os.makedirs
else:
def makedirs(name, mode=0o777, exist_ok=False):
"""makedirs(name [, mode=0o777][, exist_ok=False])
Super-mkdir; create a leaf directory and all intermediate ones. Works
like mkdir, except that any intermediate path segment (not just the
rightmost) will be created if it does not exist. If the target
directory already exists, raise an OSError if exist_ok is False.
Otherwise no exception is raised. This is recursive.
"""
try:
os.makedirs(name, mode=mode)
except OSError as e:
if (not exist_ok or e.errno != errno.EEXIST
or not os.path.isdir(name)):
raise
if np_version < (1, 8, 1):
def array_equal(a1, a2):
# copy-paste from numpy 1.8.1
try:
a1, a2 = np.asarray(a1), np.asarray(a2)
except:
return False
if a1.shape != a2.shape:
return False
return bool(np.asarray(a1 == a2).all())
else:
from numpy import array_equal
if sp_version < (0, 13, 0):
def rankdata(a, method='average'):
if method not in ('average', 'min', 'max', 'dense', 'ordinal'):
raise ValueError('unknown method "{0}"'.format(method))
arr = np.ravel(np.asarray(a))
algo = 'mergesort' if method == 'ordinal' else 'quicksort'
sorter = np.argsort(arr, kind=algo)
inv = np.empty(sorter.size, dtype=np.intp)
inv[sorter] = np.arange(sorter.size, dtype=np.intp)
if method == 'ordinal':
return inv + 1
arr = arr[sorter]
obs = np.r_[True, arr[1:] != arr[:-1]]
dense = obs.cumsum()[inv]
if method == 'dense':
return dense
# cumulative counts of each unique value
count = np.r_[np.nonzero(obs)[0], len(obs)]
if method == 'max':
return count[dense]
if method == 'min':
return count[dense - 1] + 1
# average method
return .5 * (count[dense] + count[dense - 1] + 1)
else:
from scipy.stats import rankdata
if np_version < (1, 12):
class MaskedArray(np.ma.MaskedArray):
# Before numpy 1.12, np.ma.MaskedArray object is not picklable
# This fix is needed to make our model_selection.GridSearchCV
# picklable as the ``cv_results_`` param uses MaskedArray
def __getstate__(self):
"""Return the internal state of the masked array, for pickling
purposes.
"""
cf = 'CF'[self.flags.fnc]
data_state = super(np.ma.MaskedArray, self).__reduce__()[2]
return data_state + (np.ma.getmaskarray(self).tostring(cf),
self._fill_value)
else:
from numpy.ma import MaskedArray # noqa
if 'axis' not in signature(np.linalg.norm).parameters:
def norm(X, ord=None, axis=None):
"""
Handles the axis parameter for the norm function
in old versions of numpy (useless for numpy >= 1.8).
"""
if axis is None or X.ndim == 1:
result = np.linalg.norm(X, ord=ord)
return result
if axis not in (0, 1):
raise NotImplementedError("""
The fix that adds axis parameter to the old numpy
norm only works for 1D or 2D arrays.
""")
if axis == 0:
X = X.T
result = np.zeros(X.shape[0])
for i in range(len(result)):
result[i] = np.linalg.norm(X[i], ord=ord)
return result
else:
norm = np.linalg.norm
| bsd-3-clause |
pinellolab/haystack_bio | setup.py | 1 | 2505 | #!/usr/bin/env python
"""Description:
Setup script for haystack_bio -- Epigenetic Variability and Transcription Factor Motifs Analysis Pipeline
@status: beta
@version: $Revision$
@author: Luca Pinello, Rick Farouni
@contact: [email protected]
"""
from setuptools import setup
from haystack.haystack_common import check_required_packages
def main():
setup(
version="0.5.5",
name="haystack_bio",
include_package_data=True,
packages=["haystack"],
package_dir={'haystack': 'haystack'},
entry_points={
"console_scripts": ['haystack_pipeline = haystack.run_pipeline:main',
'haystack_hotspots = haystack.find_hotspots:main',
'haystack_motifs = haystack.find_motifs:main',
'haystack_tf_activity_plane = haystack.generate_tf_activity_plane:main',
'haystack_download_genome = haystack.download_genome:main',
'haystack_run_test = haystack.haystack_common:run_testdata']
},
description="Epigenetic Variability and Transcription Factor Motifs Analysis Pipeline",
maintainer= 'Luca Pinello , Rick Farouni',
maintainer_email='[email protected], [email protected]',
author='Luca Pinello, Rick Farouni',
author_email='[email protected], [email protected]',
url='https://github.com/pinellolab/haystack_bio',
classifiers=[
'Development Status :: 5 - Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU Affero General Public License v3',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX:: Linux',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Programming Language :: Python:: 2.7',
],
install_requires=[
'numpy==1.15.4',
'pandas>=0.21.0',
'matplotlib>=2.1.0',
'scipy>=1.0.0',
'jinja2>=2.9.6',
'tqdm>=4.19.4',
'weblogo>=3.5.0',
'bx-python>=0.7.3']
)
if __name__ == '__main__':
print("Checking Required Packages")
check_required_packages()
print("Installing Package")
main()
| agpl-3.0 |
cainiaocome/scikit-learn | sklearn/externals/joblib/parallel.py | 36 | 34375 | """
Helpers for embarrassingly parallel code.
"""
# Author: Gael Varoquaux < gael dot varoquaux at normalesup dot org >
# Copyright: 2010, Gael Varoquaux
# License: BSD 3 clause
from __future__ import division
import os
import sys
import gc
import warnings
from math import sqrt
import functools
import time
import threading
import itertools
from numbers import Integral
try:
import cPickle as pickle
except:
import pickle
from ._multiprocessing_helpers import mp
if mp is not None:
from .pool import MemmapingPool
from multiprocessing.pool import ThreadPool
from .format_stack import format_exc, format_outer_frames
from .logger import Logger, short_format_time
from .my_exceptions import TransportableException, _mk_exception
from .disk import memstr_to_kbytes
from ._compat import _basestring
VALID_BACKENDS = ['multiprocessing', 'threading']
# Environment variables to protect against bad situations when nesting
JOBLIB_SPAWNED_PROCESS = "__JOBLIB_SPAWNED_PARALLEL__"
# In seconds, should be big enough to hide multiprocessing dispatching
# overhead.
# This settings was found by running benchmarks/bench_auto_batching.py
# with various parameters on various platforms.
MIN_IDEAL_BATCH_DURATION = .2
# Should not be too high to avoid stragglers: long jobs running alone
# on a single worker while other workers have no work to process any more.
MAX_IDEAL_BATCH_DURATION = 2
class BatchedCalls(object):
"""Wrap a sequence of (func, args, kwargs) tuples as a single callable"""
def __init__(self, iterator_slice):
self.items = list(iterator_slice)
self._size = len(self.items)
def __call__(self):
return [func(*args, **kwargs) for func, args, kwargs in self.items]
def __len__(self):
return self._size
###############################################################################
# CPU count that works also when multiprocessing has been disabled via
# the JOBLIB_MULTIPROCESSING environment variable
def cpu_count():
""" Return the number of CPUs.
"""
if mp is None:
return 1
return mp.cpu_count()
###############################################################################
# For verbosity
def _verbosity_filter(index, verbose):
""" Returns False for indices increasingly apart, the distance
depending on the value of verbose.
We use a lag increasing as the square of index
"""
if not verbose:
return True
elif verbose > 10:
return False
if index == 0:
return False
verbose = .5 * (11 - verbose) ** 2
scale = sqrt(index / verbose)
next_scale = sqrt((index + 1) / verbose)
return (int(next_scale) == int(scale))
###############################################################################
class WorkerInterrupt(Exception):
""" An exception that is not KeyboardInterrupt to allow subprocesses
to be interrupted.
"""
pass
###############################################################################
class SafeFunction(object):
""" Wraps a function to make it exception with full traceback in
their representation.
Useful for parallel computing with multiprocessing, for which
exceptions cannot be captured.
"""
def __init__(self, func):
self.func = func
def __call__(self, *args, **kwargs):
try:
return self.func(*args, **kwargs)
except KeyboardInterrupt:
# We capture the KeyboardInterrupt and reraise it as
# something different, as multiprocessing does not
# interrupt processing for a KeyboardInterrupt
raise WorkerInterrupt()
except:
e_type, e_value, e_tb = sys.exc_info()
text = format_exc(e_type, e_value, e_tb, context=10,
tb_offset=1)
if issubclass(e_type, TransportableException):
raise
else:
raise TransportableException(text, e_type)
###############################################################################
def delayed(function, check_pickle=True):
"""Decorator used to capture the arguments of a function.
Pass `check_pickle=False` when:
- performing a possibly repeated check is too costly and has been done
already once outside of the call to delayed.
- when used in conjunction `Parallel(backend='threading')`.
"""
# Try to pickle the input function, to catch the problems early when
# using with multiprocessing:
if check_pickle:
pickle.dumps(function)
def delayed_function(*args, **kwargs):
return function, args, kwargs
try:
delayed_function = functools.wraps(function)(delayed_function)
except AttributeError:
" functools.wraps fails on some callable objects "
return delayed_function
###############################################################################
class ImmediateComputeBatch(object):
"""Sequential computation of a batch of tasks.
This replicates the async computation API but actually does not delay
the computations when joblib.Parallel runs in sequential mode.
"""
def __init__(self, batch):
# Don't delay the application, to avoid keeping the input
# arguments in memory
self.results = batch()
def get(self):
return self.results
###############################################################################
class BatchCompletionCallBack(object):
"""Callback used by joblib.Parallel's multiprocessing backend.
This callable is executed by the parent process whenever a worker process
has returned the results of a batch of tasks.
It is used for progress reporting, to update estimate of the batch
processing duration and to schedule the next batch of tasks to be
processed.
"""
def __init__(self, dispatch_timestamp, batch_size, parallel):
self.dispatch_timestamp = dispatch_timestamp
self.batch_size = batch_size
self.parallel = parallel
def __call__(self, out):
self.parallel.n_completed_tasks += self.batch_size
this_batch_duration = time.time() - self.dispatch_timestamp
if (self.parallel.batch_size == 'auto'
and self.batch_size == self.parallel._effective_batch_size):
# Update the smoothed streaming estimate of the duration of a batch
# from dispatch to completion
old_duration = self.parallel._smoothed_batch_duration
if old_duration == 0:
# First record of duration for this batch size after the last
# reset.
new_duration = this_batch_duration
else:
# Update the exponentially weighted average of the duration of
# batch for the current effective size.
new_duration = 0.8 * old_duration + 0.2 * this_batch_duration
self.parallel._smoothed_batch_duration = new_duration
self.parallel.print_progress()
if self.parallel._original_iterator is not None:
self.parallel.dispatch_next()
###############################################################################
class Parallel(Logger):
''' Helper class for readable parallel mapping.
Parameters
-----------
n_jobs: int, default: 1
The maximum number of concurrently running jobs, such as the number
of Python worker processes when backend="multiprocessing"
or the size of the thread-pool when backend="threading".
If -1 all CPUs are used. If 1 is given, no parallel computing code
is used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all
CPUs but one are used.
backend: str or None, default: 'multiprocessing'
Specify the parallelization backend implementation.
Supported backends are:
- "multiprocessing" used by default, can induce some
communication and memory overhead when exchanging input and
output data with the with the worker Python processes.
- "threading" is a very low-overhead backend but it suffers
from the Python Global Interpreter Lock if the called function
relies a lot on Python objects. "threading" is mostly useful
when the execution bottleneck is a compiled extension that
explicitly releases the GIL (for instance a Cython loop wrapped
in a "with nogil" block or an expensive call to a library such
as NumPy).
verbose: int, optional
The verbosity level: if non zero, progress messages are
printed. Above 50, the output is sent to stdout.
The frequency of the messages increases with the verbosity level.
If it more than 10, all iterations are reported.
pre_dispatch: {'all', integer, or expression, as in '3*n_jobs'}
The number of batches (of tasks) to be pre-dispatched.
Default is '2*n_jobs'. When batch_size="auto" this is reasonable
default and the multiprocessing workers shoud never starve.
batch_size: int or 'auto', default: 'auto'
The number of atomic tasks to dispatch at once to each
worker. When individual evaluations are very fast, multiprocessing
can be slower than sequential computation because of the overhead.
Batching fast computations together can mitigate this.
The ``'auto'`` strategy keeps track of the time it takes for a batch
to complete, and dynamically adjusts the batch size to keep the time
on the order of half a second, using a heuristic. The initial batch
size is 1.
``batch_size="auto"`` with ``backend="threading"`` will dispatch
batches of a single task at a time as the threading backend has
very little overhead and using larger batch size has not proved to
bring any gain in that case.
temp_folder: str, optional
Folder to be used by the pool for memmaping large arrays
for sharing memory with worker processes. If None, this will try in
order:
- a folder pointed by the JOBLIB_TEMP_FOLDER environment variable,
- /dev/shm if the folder exists and is writable: this is a RAMdisk
filesystem available by default on modern Linux distributions,
- the default system temporary folder that can be overridden
with TMP, TMPDIR or TEMP environment variables, typically /tmp
under Unix operating systems.
Only active when backend="multiprocessing".
max_nbytes int, str, or None, optional, 1M by default
Threshold on the size of arrays passed to the workers that
triggers automated memory mapping in temp_folder. Can be an int
in Bytes, or a human-readable string, e.g., '1M' for 1 megabyte.
Use None to disable memmaping of large arrays.
Only active when backend="multiprocessing".
Notes
-----
This object uses the multiprocessing module to compute in
parallel the application of a function to many different
arguments. The main functionality it brings in addition to
using the raw multiprocessing API are (see examples for details):
* More readable code, in particular since it avoids
constructing list of arguments.
* Easier debugging:
- informative tracebacks even when the error happens on
the client side
- using 'n_jobs=1' enables to turn off parallel computing
for debugging without changing the codepath
- early capture of pickling errors
* An optional progress meter.
* Interruption of multiprocesses jobs with 'Ctrl-C'
* Flexible pickling control for the communication to and from
the worker processes.
* Ability to use shared memory efficiently with worker
processes for large numpy-based datastructures.
Examples
--------
A simple example:
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
Reshaping the output when the function has several return
values:
>>> from math import modf
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=1)(delayed(modf)(i/2.) for i in range(10))
>>> res, i = zip(*r)
>>> res
(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5)
>>> i
(0.0, 0.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0)
The progress meter: the higher the value of `verbose`, the more
messages::
>>> from time import sleep
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=2, verbose=5)(delayed(sleep)(.1) for _ in range(10)) #doctest: +SKIP
[Parallel(n_jobs=2)]: Done 1 out of 10 | elapsed: 0.1s remaining: 0.9s
[Parallel(n_jobs=2)]: Done 3 out of 10 | elapsed: 0.2s remaining: 0.5s
[Parallel(n_jobs=2)]: Done 6 out of 10 | elapsed: 0.3s remaining: 0.2s
[Parallel(n_jobs=2)]: Done 9 out of 10 | elapsed: 0.5s remaining: 0.1s
[Parallel(n_jobs=2)]: Done 10 out of 10 | elapsed: 0.5s finished
Traceback example, note how the line of the error is indicated
as well as the values of the parameter passed to the function that
triggered the exception, even though the traceback happens in the
child process::
>>> from heapq import nlargest
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=2)(delayed(nlargest)(2, n) for n in (range(4), 'abcde', 3)) #doctest: +SKIP
#...
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
TypeError Mon Nov 12 11:37:46 2012
PID: 12934 Python 2.7.3: /usr/bin/python
...........................................................................
/usr/lib/python2.7/heapq.pyc in nlargest(n=2, iterable=3, key=None)
419 if n >= size:
420 return sorted(iterable, key=key, reverse=True)[:n]
421
422 # When key is none, use simpler decoration
423 if key is None:
--> 424 it = izip(iterable, count(0,-1)) # decorate
425 result = _nlargest(n, it)
426 return map(itemgetter(0), result) # undecorate
427
428 # General case, slowest method
TypeError: izip argument #1 must support iteration
___________________________________________________________________________
Using pre_dispatch in a producer/consumer situation, where the
data is generated on the fly. Note how the producer is first
called a 3 times before the parallel loop is initiated, and then
called to generate new data on the fly. In this case the total
number of iterations cannot be reported in the progress messages::
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> def producer():
... for i in range(6):
... print('Produced %s' % i)
... yield i
>>> out = Parallel(n_jobs=2, verbose=100, pre_dispatch='1.5*n_jobs')(
... delayed(sqrt)(i) for i in producer()) #doctest: +SKIP
Produced 0
Produced 1
Produced 2
[Parallel(n_jobs=2)]: Done 1 jobs | elapsed: 0.0s
Produced 3
[Parallel(n_jobs=2)]: Done 2 jobs | elapsed: 0.0s
Produced 4
[Parallel(n_jobs=2)]: Done 3 jobs | elapsed: 0.0s
Produced 5
[Parallel(n_jobs=2)]: Done 4 jobs | elapsed: 0.0s
[Parallel(n_jobs=2)]: Done 5 out of 6 | elapsed: 0.0s remaining: 0.0s
[Parallel(n_jobs=2)]: Done 6 out of 6 | elapsed: 0.0s finished
'''
def __init__(self, n_jobs=1, backend='multiprocessing', verbose=0,
pre_dispatch='2 * n_jobs', batch_size='auto', temp_folder=None,
max_nbytes='1M', mmap_mode='r'):
self.verbose = verbose
self._mp_context = None
if backend is None:
# `backend=None` was supported in 0.8.2 with this effect
backend = "multiprocessing"
elif hasattr(backend, 'Pool') and hasattr(backend, 'Lock'):
# Make it possible to pass a custom multiprocessing context as
# backend to change the start method to forkserver or spawn or
# preload modules on the forkserver helper process.
self._mp_context = backend
backend = "multiprocessing"
if backend not in VALID_BACKENDS:
raise ValueError("Invalid backend: %s, expected one of %r"
% (backend, VALID_BACKENDS))
self.backend = backend
self.n_jobs = n_jobs
if (batch_size == 'auto'
or isinstance(batch_size, Integral) and batch_size > 0):
self.batch_size = batch_size
else:
raise ValueError(
"batch_size must be 'auto' or a positive integer, got: %r"
% batch_size)
self.pre_dispatch = pre_dispatch
self._pool = None
self._temp_folder = temp_folder
if isinstance(max_nbytes, _basestring):
self._max_nbytes = 1024 * memstr_to_kbytes(max_nbytes)
else:
self._max_nbytes = max_nbytes
self._mmap_mode = mmap_mode
# Not starting the pool in the __init__ is a design decision, to be
# able to close it ASAP, and not burden the user with closing it.
self._output = None
self._jobs = list()
# A flag used to abort the dispatching of jobs in case an
# exception is found
self._aborting = False
def _dispatch(self, batch):
"""Queue the batch for computing, with or without multiprocessing
WARNING: this method is not thread-safe: it should be only called
indirectly via dispatch_one_batch.
"""
if self._pool is None:
job = ImmediateComputeBatch(batch)
self._jobs.append(job)
self.n_dispatched_batches += 1
self.n_dispatched_tasks += len(batch)
self.n_completed_tasks += len(batch)
if not _verbosity_filter(self.n_dispatched_batches, self.verbose):
self._print('Done %3i tasks | elapsed: %s',
(self.n_completed_tasks,
short_format_time(time.time() - self._start_time)
))
else:
# If job.get() catches an exception, it closes the queue:
if self._aborting:
return
dispatch_timestamp = time.time()
cb = BatchCompletionCallBack(dispatch_timestamp, len(batch), self)
job = self._pool.apply_async(SafeFunction(batch), callback=cb)
self._jobs.append(job)
self.n_dispatched_tasks += len(batch)
self.n_dispatched_batches += 1
def dispatch_next(self):
"""Dispatch more data for parallel processing
This method is meant to be called concurrently by the multiprocessing
callback. We rely on the thread-safety of dispatch_one_batch to protect
against concurrent consumption of the unprotected iterator.
"""
if not self.dispatch_one_batch(self._original_iterator):
self._iterating = False
self._original_iterator = None
def dispatch_one_batch(self, iterator):
"""Prefetch the tasks for the next batch and dispatch them.
The effective size of the batch is computed here.
If there are no more jobs to dispatch, return False, else return True.
The iterator consumption and dispatching is protected by the same
lock so calling this function should be thread safe.
"""
if self.batch_size == 'auto' and self.backend == 'threading':
# Batching is never beneficial with the threading backend
batch_size = 1
elif self.batch_size == 'auto':
old_batch_size = self._effective_batch_size
batch_duration = self._smoothed_batch_duration
if (batch_duration > 0 and
batch_duration < MIN_IDEAL_BATCH_DURATION):
# The current batch size is too small: the duration of the
# processing of a batch of task is not large enough to hide
# the scheduling overhead.
ideal_batch_size = int(
old_batch_size * MIN_IDEAL_BATCH_DURATION / batch_duration)
# Multiply by two to limit oscilations between min and max.
batch_size = max(2 * ideal_batch_size, 1)
self._effective_batch_size = batch_size
if self.verbose >= 10:
self._print("Batch computation too fast (%.4fs.) "
"Setting batch_size=%d.", (
batch_duration, batch_size))
elif (batch_duration > MAX_IDEAL_BATCH_DURATION and
old_batch_size >= 2):
# The current batch size is too big. If we schedule overly long
# running batches some CPUs might wait with nothing left to do
# while a couple of CPUs a left processing a few long running
# batches. Better reduce the batch size a bit to limit the
# likelihood of scheduling such stragglers.
self._effective_batch_size = batch_size = old_batch_size // 2
if self.verbose >= 10:
self._print("Batch computation too slow (%.2fs.) "
"Setting batch_size=%d.", (
batch_duration, batch_size))
else:
# No batch size adjustment
batch_size = old_batch_size
if batch_size != old_batch_size:
# Reset estimation of the smoothed mean batch duration: this
# estimate is updated in the multiprocessing apply_async
# CallBack as long as the batch_size is constant. Therefore
# we need to reset the estimate whenever we re-tune the batch
# size.
self._smoothed_batch_duration = 0
else:
# Fixed batch size strategy
batch_size = self.batch_size
with self._lock:
tasks = BatchedCalls(itertools.islice(iterator, batch_size))
if not tasks:
# No more tasks available in the iterator: tell caller to stop.
return False
else:
self._dispatch(tasks)
return True
def _print(self, msg, msg_args):
"""Display the message on stout or stderr depending on verbosity"""
# XXX: Not using the logger framework: need to
# learn to use logger better.
if not self.verbose:
return
if self.verbose < 50:
writer = sys.stderr.write
else:
writer = sys.stdout.write
msg = msg % msg_args
writer('[%s]: %s\n' % (self, msg))
def print_progress(self):
"""Display the process of the parallel execution only a fraction
of time, controlled by self.verbose.
"""
if not self.verbose:
return
elapsed_time = time.time() - self._start_time
# This is heuristic code to print only 'verbose' times a messages
# The challenge is that we may not know the queue length
if self._original_iterator:
if _verbosity_filter(self.n_dispatched_batches, self.verbose):
return
self._print('Done %3i tasks | elapsed: %s',
(self.n_completed_tasks,
short_format_time(elapsed_time),
))
else:
index = self.n_dispatched_batches
# We are finished dispatching
total_tasks = self.n_dispatched_tasks
# We always display the first loop
if not index == 0:
# Display depending on the number of remaining items
# A message as soon as we finish dispatching, cursor is 0
cursor = (total_tasks - index + 1
- self._pre_dispatch_amount)
frequency = (total_tasks // self.verbose) + 1
is_last_item = (index + 1 == total_tasks)
if (is_last_item or cursor % frequency):
return
remaining_time = (elapsed_time / (index + 1) *
(self.n_dispatched_tasks - index - 1.))
self._print('Done %3i out of %3i | elapsed: %s remaining: %s',
(index + 1,
total_tasks,
short_format_time(elapsed_time),
short_format_time(remaining_time),
))
def retrieve(self):
self._output = list()
while self._iterating or len(self._jobs) > 0:
if len(self._jobs) == 0:
# Wait for an async callback to dispatch new jobs
time.sleep(0.01)
continue
# We need to be careful: the job queue can be filling up as
# we empty it
if hasattr(self, '_lock'):
self._lock.acquire()
job = self._jobs.pop(0)
if hasattr(self, '_lock'):
self._lock.release()
try:
self._output.extend(job.get())
except tuple(self.exceptions) as exception:
try:
self._aborting = True
self._lock.acquire()
if isinstance(exception,
(KeyboardInterrupt, WorkerInterrupt)):
# We have captured a user interruption, clean up
# everything
if hasattr(self, '_pool'):
self._pool.close()
self._pool.terminate()
# We can now allow subprocesses again
os.environ.pop('__JOBLIB_SPAWNED_PARALLEL__', 0)
raise exception
elif isinstance(exception, TransportableException):
# Capture exception to add information on the local
# stack in addition to the distant stack
this_report = format_outer_frames(context=10,
stack_start=1)
report = """Multiprocessing exception:
%s
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
%s""" % (
this_report,
exception.message,
)
# Convert this to a JoblibException
exception_type = _mk_exception(exception.etype)[0]
raise exception_type(report)
raise exception
finally:
self._lock.release()
def __call__(self, iterable):
if self._jobs:
raise ValueError('This Parallel instance is already running')
n_jobs = self.n_jobs
if n_jobs == 0:
raise ValueError('n_jobs == 0 in Parallel has no meaning')
if n_jobs < 0 and mp is not None:
n_jobs = max(mp.cpu_count() + 1 + n_jobs, 1)
# The list of exceptions that we will capture
self.exceptions = [TransportableException]
self._lock = threading.Lock()
# Whether or not to set an environment flag to track
# multiple process spawning
set_environ_flag = False
if (n_jobs is None or mp is None or n_jobs == 1):
n_jobs = 1
self._pool = None
elif self.backend == 'threading':
self._pool = ThreadPool(n_jobs)
elif self.backend == 'multiprocessing':
if mp.current_process().daemon:
# Daemonic processes cannot have children
n_jobs = 1
self._pool = None
warnings.warn(
'Multiprocessing-backed parallel loops cannot be nested,'
' setting n_jobs=1',
stacklevel=2)
elif threading.current_thread().name != 'MainThread':
# Prevent posix fork inside in non-main posix threads
n_jobs = 1
self._pool = None
warnings.warn(
'Multiprocessing backed parallel loops cannot be nested'
' below threads, setting n_jobs=1',
stacklevel=2)
else:
already_forked = int(os.environ.get('__JOBLIB_SPAWNED_PARALLEL__', 0))
if already_forked:
raise ImportError('[joblib] Attempting to do parallel computing '
'without protecting your import on a system that does '
'not support forking. To use parallel-computing in a '
'script, you must protect your main loop using "if '
"__name__ == '__main__'"
'". Please see the joblib documentation on Parallel '
'for more information'
)
# Make sure to free as much memory as possible before forking
gc.collect()
# Set an environment variable to avoid infinite loops
set_environ_flag = True
poolargs = dict(
max_nbytes=self._max_nbytes,
mmap_mode=self._mmap_mode,
temp_folder=self._temp_folder,
verbose=max(0, self.verbose - 50),
context_id=0, # the pool is used only for one call
)
if self._mp_context is not None:
# Use Python 3.4+ multiprocessing context isolation
poolargs['context'] = self._mp_context
self._pool = MemmapingPool(n_jobs, **poolargs)
# We are using multiprocessing, we also want to capture
# KeyboardInterrupts
self.exceptions.extend([KeyboardInterrupt, WorkerInterrupt])
else:
raise ValueError("Unsupported backend: %s" % self.backend)
if self.batch_size == 'auto':
self._effective_batch_size = 1
iterator = iter(iterable)
pre_dispatch = self.pre_dispatch
if pre_dispatch == 'all' or n_jobs == 1:
# prevent further dispatch via multiprocessing callback thread
self._original_iterator = None
self._pre_dispatch_amount = 0
else:
self._original_iterator = iterator
if hasattr(pre_dispatch, 'endswith'):
pre_dispatch = eval(pre_dispatch)
self._pre_dispatch_amount = pre_dispatch = int(pre_dispatch)
# The main thread will consume the first pre_dispatch items and
# the remaining items will later be lazily dispatched by async
# callbacks upon task completions.
iterator = itertools.islice(iterator, pre_dispatch)
self._start_time = time.time()
self.n_dispatched_batches = 0
self.n_dispatched_tasks = 0
self.n_completed_tasks = 0
self._smoothed_batch_duration = 0.0
try:
if set_environ_flag:
# Set an environment variable to avoid infinite loops
os.environ[JOBLIB_SPAWNED_PROCESS] = '1'
self._iterating = True
while self.dispatch_one_batch(iterator):
pass
if pre_dispatch == "all" or n_jobs == 1:
# The iterable was consumed all at once by the above for loop.
# No need to wait for async callbacks to trigger to
# consumption.
self._iterating = False
self.retrieve()
# Make sure that we get a last message telling us we are done
elapsed_time = time.time() - self._start_time
self._print('Done %3i out of %3i | elapsed: %s finished',
(len(self._output),
len(self._output),
short_format_time(elapsed_time)
))
finally:
if n_jobs > 1:
self._pool.close()
self._pool.terminate() # terminate does a join()
if self.backend == 'multiprocessing':
os.environ.pop(JOBLIB_SPAWNED_PROCESS, 0)
self._jobs = list()
output = self._output
self._output = None
return output
def __repr__(self):
return '%s(n_jobs=%s)' % (self.__class__.__name__, self.n_jobs)
| bsd-3-clause |
strets123/rdkit | rdkit/Chem/Draw/UnitTestSimilarityMaps.py | 3 | 5002 | # $Id$
#
# Copyright (c) 2013, Novartis Institutes for BioMedical Research Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Novartis Institutes for BioMedical Research Inc.
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Created by Sereina Riniker, Aug 2013
""" unit testing code for molecule drawing
"""
from rdkit import RDConfig
import unittest,os,tempfile
from rdkit import Chem
from rdkit.Chem import Draw
try:
from rdkit.Chem.Draw import SimilarityMaps as sm
except ImportError:
sm = None
from rdkit.RDLogger import logger
logger = logger()
class TestCase(unittest.TestCase):
def setUp(self):
self.mol1 = Chem.MolFromSmiles('c1ccccc1')
self.mol2 = Chem.MolFromSmiles('c1ccncc1')
def testSimilarityMap(self):
# Morgan2 BV
refWeights = [0.5, 0.5, 0.5, -0.5, 0.5, 0.5]
weights = sm.GetAtomicWeightsForFingerprint(self.mol1, self.mol2, lambda m, i: sm.GetMorganFingerprint(m, i, radius=2, fpType='bv'))
for w,r in zip(weights, refWeights): self.assertEqual(w, r)
fig, maxWeight = sm.GetSimilarityMapForFingerprint(self.mol1, self.mol2, lambda m, i: sm.GetMorganFingerprint(m, i, radius=2, fpType='bv'))
self.assertEqual(maxWeight, 0.5)
weights, maxWeight = sm.GetStandardizedWeights(weights)
self.assertEqual(maxWeight, 0.5)
refWeights = [1.0, 1.0, 1.0, -1.0, 1.0, 1.0]
for w,r in zip(weights, refWeights): self.assertEqual(w, r)
weights = sm.GetAtomicWeightsForFingerprint(self.mol1, self.mol2, lambda m, i: sm.GetMorganFingerprint(m, i, fpType='count'))
self.assertTrue(weights[3] < 0)
weights = sm.GetAtomicWeightsForFingerprint(self.mol1, self.mol2, lambda m, i: sm.GetMorganFingerprint(m, i, fpType='bv', useFeatures=True))
self.assertTrue(weights[3] < 0)
# hashed AP BV
refWeights = [0.09523, 0.17366, 0.17366, -0.23809, 0.17366, 0.17366]
weights = sm.GetAtomicWeightsForFingerprint(self.mol1, self.mol2, lambda m, i: sm.GetAPFingerprint(m, i, fpType='bv', nBits=1024))
for w,r in zip(weights, refWeights): self.assertAlmostEqual(w, r, 4)
weights = sm.GetAtomicWeightsForFingerprint(self.mol1, self.mol2, lambda m, i: sm.GetAPFingerprint(m, i, fpType='normal'))
self.assertTrue(weights[3] < 0)
weights = sm.GetAtomicWeightsForFingerprint(self.mol1, self.mol2, lambda m, i: sm.GetAPFingerprint(m, i, fpType='hashed'))
self.assertTrue(weights[3] < 0)
# hashed TT BV
refWeights = [0.5, 0.5, -0.16666, -0.5, -0.16666, 0.5]
weights = sm.GetAtomicWeightsForFingerprint(self.mol1, self.mol2, lambda m, i: sm.GetTTFingerprint(m, i, fpType='bv', nBits=1024, nBitsPerEntry=1))
for w,r in zip(weights, refWeights): self.assertAlmostEqual(w, r, 4)
weights = sm.GetAtomicWeightsForFingerprint(self.mol1, self.mol2, lambda m, i: sm.GetTTFingerprint(m, i, fpType='normal'))
self.assertTrue(weights[3] < 0)
weights = sm.GetAtomicWeightsForFingerprint(self.mol1, self.mol2, lambda m, i: sm.GetTTFingerprint(m, i, fpType='hashed'))
self.assertTrue(weights[3] < 0)
# RDK fingerprint BV
refWeights = [0.42105, 0.42105, 0.42105, -0.32895, 0.42105, 0.42105]
weights = sm.GetAtomicWeightsForFingerprint(self.mol1, self.mol2, lambda m, i: sm.GetRDKFingerprint(m, i, nBits=1024, nBitsPerHash=1))
for w,r in zip(weights, refWeights): self.assertAlmostEqual(w, r, 4)
if __name__ == '__main__':
try:
import matplotlib
from rdkit.Chem.Draw.mplCanvas import Canvas
except ImportError:
pass
except RuntimeError: # happens with GTK can't initialize
pass
else:
unittest.main()
| bsd-3-clause |
yyl/btc-price-analysis | parseCSV.py | 1 | 2389 | #!/usr/bin/pythn
import pandas as pd
import os
import sys
import secrets
import xml.etree.cElementTree as ET
import csv
# config info
os.chdir(secrets.ROOT)
output_dir = "./data"
### method to stdout news headlines only
def getHeadline(nyt_path, nyt_news_only_path):
data = pd.read_csv(nyt_path, names=["time","headline"]).iloc[:,1:]
data.to_csv(nyt_news_only_path, header=False, index=False, line_terminator=". ")
### method to merge sentiment score
def mergeSentiScore(nyt_path, nyt_xml_path, merged_output_path):
# parse xml output of sentiment analysis
tree = ET.parse(nyt_xml_path)
root = tree.getroot()
attr_g = (s.attrib for s in root[0][0])
score = pd.DataFrame([attrs for attrs in attr_g])
# lose the id column
score = score.iloc[:,1:]
# read in news data
news_data = pd.read_csv(nyt_path, names=["time","headline"])
# merge and output to csv file
merged = pd.merge(news_data, score, left_index=True, right_index=True, how="left")
merged.to_csv(merged_output_path, index=False)
if __name__ == "__main__":
if len(sys.argv) != 3:
print "usage: python parseCSV.py command_index filename"
print "command_index:"
print " 1: getHeadline()"
print " 2: mergeSentiScore()"
print "filename: the name of input file"
exit()
param = int(sys.argv[1])
## parse and generate paths to file
## nyt_name is the filename of raw news data
nyt_name = sys.argv[2]
## nyt_paths is the path to nyt_name file
nyt_path = os.path.join(output_dir, nyt_name)
## file containing news headline only
nyt_news_only = "news_only_" + nyt_name
## path to nyt_news_only file
nyt_news_only_path = os.path.join(output_dir, nyt_news_only)
## trigger getHeadline function
if param == 1:
getHeadline(nyt_path, nyt_news_only_path)
## trigger mergeSentiScore function
elif param == 2:
## the output file name is the same as the input file, but with additional extension xml
## output file of sentiment score algorithm
nyt_xml_path = os.path.join(output_dir, nyt_news_only + ".xml")
## the path of the merged output
merged_output_path = os.path.join(output_dir, nyt_name.split(".")[0] + "_with_score.csv")
mergeSentiScore(nyt_path, nyt_xml_path, merged_output_path)
| gpl-2.0 |
mhue/scikit-learn | examples/ensemble/plot_gradient_boosting_regression.py | 227 | 2520 | """
============================
Gradient Boosting regression
============================
Demonstrate Gradient Boosting on the Boston housing dataset.
This example fits a Gradient Boosting model with least squares loss and
500 regression trees of depth 4.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
###############################################################################
# Load data
boston = datasets.load_boston()
X, y = shuffle(boston.data, boston.target, random_state=13)
X = X.astype(np.float32)
offset = int(X.shape[0] * 0.9)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
###############################################################################
# Fit regression model
params = {'n_estimators': 500, 'max_depth': 4, 'min_samples_split': 1,
'learning_rate': 0.01, 'loss': 'ls'}
clf = ensemble.GradientBoostingRegressor(**params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
print("MSE: %.4f" % mse)
###############################################################################
# Plot training deviance
# compute test set deviance
test_score = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
test_score[i] = clf.loss_(y_test, y_pred)
plt.figure(figsize=(12, 6))
plt.subplot(1, 2, 1)
plt.title('Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, clf.train_score_, 'b-',
label='Training Set Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, test_score, 'r-',
label='Test Set Deviance')
plt.legend(loc='upper right')
plt.xlabel('Boosting Iterations')
plt.ylabel('Deviance')
###############################################################################
# Plot feature importance
feature_importance = clf.feature_importances_
# make importances relative to max importance
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + .5
plt.subplot(1, 2, 2)
plt.barh(pos, feature_importance[sorted_idx], align='center')
plt.yticks(pos, boston.feature_names[sorted_idx])
plt.xlabel('Relative Importance')
plt.title('Variable Importance')
plt.show()
| bsd-3-clause |
henryroe/WhatsObservable | whatsobservable.py | 2 | 18643 | import datetime
import ephem
import os.path
import os
import numpy as np
import pdb
from pandas import DataFrame
__version__ = '0.1.2'
class Error(Exception):
pass
def _convert_datetime_to_pyephem_date_string(in_datetime):
return in_datetime.strftime('%Y/%m/%d %H:%M:%S')
def _find_cached_file(filename):
base = os.path.expanduser('~/')
# Look in a few likely locations before doing a giant search
filenames_to_test = [os.path.join(base, filename),
os.path.join(base, 'refdata', filename),
os.path.join(base, 'Dropbox', filename),
os.path.join(base, 'Dropbox', 'refdata', filename)]
for cur_filename in filenames_to_test:
if os.path.isfile(cur_filename):
return cur_filename
# didn't find it, so do a giant search
for root, dirs, files in os.walk(base):
if filename in files:
return os.path.join(root, filename)
return "File Not Found"
def get_latlon_from_observatory_code(code):
if type(code) is int:
code = '%03i' % code
elif type(code) is str:
code = code[:3] # trim any remainder, like @399
try:
obscode_filename = _find_cached_file('ObsCodes.html')
# TODO: add a verbose option to print path to ObsCodes.html
obscodes = open(obscode_filename, 'r').read().splitlines()
except:
raise Error("Problem reading ObsCodes.html file from disk. \n"
"Most likely you need to go download a copy from: \n"
" http://www.minorplanetcenter.net/iau/lists/ObsCodes.html")
curobsline = [a for a in obscodes if a.startswith(code)][0]
output = {'obscode':curobsline[0:3],
'longitude':float(curobsline[4:13]),
'cos':float(curobsline[13:21]),
'sin':float(curobsline[21:30]),
'name':curobsline[30:].strip()}
# From the documentation:
# "The following list gives the observatory code, longitude (in degrees east of Greenwich) and the parallax
# constants (rho cos phi' and rho sin phi', where phi' is the geocentric latitude and rho is the geocentric
# distance in earth radii) for each observatory. It is updated nightly."
output['latitude'] = np.degrees(np.arctan2(output['sin'], output['cos']))
# Unsure where the following comment came from:
# geocentric distance in earth radii:
# output['sin']/np.sin(np.radians(output['latitude']))
# NOTE: while ObsCodes.html is clear about being geocentric, it is unclear what pyephem wants, so blaze ahead
# TODO: confirm whether pyephem wants geocentric
return output
def pack_mpc_date(in_datetime):
"""
Convert a datetime.date or datetime.datetime object into the MPC packed date format, as described at:
http://www.minorplanetcenter.net/iau/info/PackedDates.html
Copy of the packing definition from the above web page:
Packed Dates
Dates of the form YYYYMMDD may be packed into five characters to conserve space.
The first two digits of the year are packed into a single character in column 1 (I = 18, J = 19, K = 20). Columns 2-3 contain the last two digits of the year. Column 4 contains the month and column 5 contains the day, coded as detailed below:
Month Day Character Day Character
in Col 4 or 5 in Col 4 or 5
Jan. 1 1 17 H
Feb. 2 2 18 I
Mar. 3 3 19 J
Apr. 4 4 20 K
May 5 5 21 L
June 6 6 22 M
July 7 7 23 N
Aug. 8 8 24 O
Sept. 9 9 25 P
Oct. 10 A 26 Q
Nov. 11 B 27 R
Dec. 12 C 28 S
13 D 29 T
14 E 30 U
15 F 31 V
16 G
Examples:
1996 Jan. 1 = J9611
1996 Jan. 10 = J961A
1996 Sept.30 = J969U
1996 Oct. 1 = J96A1
2001 Oct. 22 = K01AM
This system can be extended to dates with non-integral days. The decimal fraction of the day is simply appended to the five characters defined above.
Examples:
1998 Jan. 18.73 = J981I73
2001 Oct. 22.138303 = K01AM138303
"""
if in_datetime.year >= 1800 and in_datetime.year < 1900:
century = 'I'
elif in_datetime.year >= 1900 and in_datetime.year < 2000:
century = 'J'
elif in_datetime.year >= 2000 and in_datetime.year < 2100:
century = 'K'
else:
raise Error("Year is not within 1800-2099: " + in_datetime.isoformat())
year = in_datetime.strftime('%y')
translate = {}
for i in range(10):
translate[i] = str(i)
for i in range(10,32):
translate[i] = chr(ord('A') + i - 10)
month = translate[in_datetime.month]
day = translate[in_datetime.day]
try:
decimaldays = ('%7.5f' % ((in_datetime.hour + (in_datetime.minute / 60.) + (in_datetime.second / 3600.)) / 24.))[2:]
except:
decimaldays = ''
return century + year + month + day + decimaldays
def unpack_mpc_date(in_packed):
"""
Convert a MPC packed date format (as described below) to a datetime.date or datetime.datetime object
http://www.minorplanetcenter.net/iau/info/PackedDates.html
Copy of the packing definition from the above web page:
Packed Dates
Dates of the form YYYYMMDD may be packed into five characters to conserve space.
The first two digits of the year are packed into a single character in column 1 (I = 18, J = 19, K = 20). Columns 2-3 contain the last two digits of the year. Column 4 contains the month and column 5 contains the day, coded as detailed below:
Month Day Character Day Character
in Col 4 or 5 in Col 4 or 5
Jan. 1 1 17 H
Feb. 2 2 18 I
Mar. 3 3 19 J
Apr. 4 4 20 K
May 5 5 21 L
June 6 6 22 M
July 7 7 23 N
Aug. 8 8 24 O
Sept. 9 9 25 P
Oct. 10 A 26 Q
Nov. 11 B 27 R
Dec. 12 C 28 S
13 D 29 T
14 E 30 U
15 F 31 V
16 G
Examples:
1996 Jan. 1 = J9611
1996 Jan. 10 = J961A
1996 Sept.30 = J969U
1996 Oct. 1 = J96A1
2001 Oct. 22 = K01AM
This system can be extended to dates with non-integral days. The decimal fraction of the day is simply appended to the five characters defined above.
Examples:
1998 Jan. 18.73 = J981I73
2001 Oct. 22.138303 = K01AM138303
"""
translate = {}
for i in range(10):
translate[str(i)] = i
for i in range(10,32):
translate[chr(ord('A') + i - 10)] = i
if in_packed[0] == 'I':
year = 1800
elif in_packed[0] == 'J':
year = 1900
elif in_packed[0] == 'K':
year = 2000
else:
raise Error('Unrecognized century code at start of: ' + in_packed)
year += int(in_packed[1:3])
month = translate[in_packed[3]]
day = translate[in_packed[4]]
if len(in_packed) == 5:
return datetime.date(year, month, day)
else:
decimaldays = float('0.' + in_packed[5:])
hour = int(decimaldays * 24.)
minute = int((decimaldays * 24. - hour) * 60.)
second = int(round(decimaldays * 24. * 60. * 60. - (hour * 3600.) - (minute * 60.)))
return datetime.datetime(year, month, day, hour, minute, second)
#TODO: clean up the following comments and incorporate into the code
# can get all numbered asteroids (and other junk) from minor planet center in MPCORB.DAT file:
# [MPCORB.DAT](http://www.minorplanetcenter.net/iau/MPCORB/MPCORB.DAT)
# [Format is described in more detail](http://www.minorplanetcenter.org/iau/info/MPOrbitFormat.html)
# 944 Hidalgo line as of 2013-07-26 is:
#Des'n H G Epoch M Peri. Node Incl. e n a Reference #Obs #Opp Arc rms Perts Computer
#00944 10.77 0.15 K134I 215.40344 56.65077 21.56494 42.54312 0.6617811 0.07172582 5.7370114 0 MPO263352 582 21 1920-2010 0.77 M-v 38h MPCLINUX 0000 (944) Hidalgo 20100222
# But, I want in xephem format, [described here](http://www.clearskyinstitute.com/xephem/help/xephem.html#mozTocId468501)
# and minor planet provides a subset in xephem format [here](http://www.minorplanetcenter.net/iau/Ephemerides/Bright/2013/Soft03Bright.txt):
# though to ensure I was comparing same exact orbit solutions, used 944 Hidalgo from
# http://www.minorplanetcenter.net/iau/Ephemerides/Distant/Soft03Distant.txt
# From MPO263352
#944 Hidalgo,e,42.5431,21.5649,56.6508,5.737011,0.0717258,0.66178105,215.4034,04/18.0/2013,2000,H10.77,0.15
# So, for my purposes, the xephem format, separated by commas is:
# NUMBER NAME - easy enough....
# e - for ecliptic elliptical orbit
# i = inclination, degrees (directly from MPCORB.DAT)
# O = longitude of ascending node, degrees (directly from MPCORB.DAT)
# o = argument of perihelion, degrees (directly from MPCORB.DAT)
# a = mean distance (aka semi-major axis), AU (directly from MPCORB.DAT)
# n = mean daily motion, degrees per day (computed from a**3/2 if omitted) (directly from MPCORB.DAT)
# e = eccentricity, must be < 1 (directly from MPCORB.DAT)
# M = mean anomaly, i.e., degrees from perihelion (directly from MPCORB.DAT)
# E = epoch date, i.e., time of M MM/DD.D/YYYY
# in MPCORB.DAT epoch date is packed according to rules:
# http://www.minorplanetcenter.net/iau/info/PackedDates.html
# Subfield 10A First date these elements are valid, optional
# SubField 10B Last date these elements are valid, optional
# D = the equinox year, i.e., time of i, O and o (always J2000.0 in MPCORB.DAT, so 2000
# First component of magnitude model, either g from (g,k) or H from (H,G). Specify which by preceding the number with a "g" or an "H". In absence of either specifier the default is (H,G) model. See Magnitude models.
# corresponds to H in MPCORB.DAT, just need to preface with an 'H'
# Second component of magnitude model, either k or G (directly from MPCORB.DAT)
# s = angular size at 1 AU, arc seconds, optional - I don't care, so skip....
def convert_mpcorb_to_xephem(input):
"""
convert from, e.g.:
[MPCORB.DAT](http://www.minorplanetcenter.net/iau/MPCORB/MPCORB.DAT)
[Format is described in more detail](http://www.minorplanetcenter.org/iau/info/MPOrbitFormat.html)
Des'n H G Epoch M Peri. Node Incl. e n a Reference #Obs #Opp Arc rms Perts Computer
# 944 Hidalgo line as of 2013-07-26 is:
00944 10.77 0.15 K134I 215.40344 56.65077 21.56494 42.54312 0.6617811 0.07172582 5.7370114 0 MPO263352 582 21 1920-2010 0.77 M-v 38h MPCLINUX 0000 (944) Hidalgo 20100222
to
# From MPO263352
944 Hidalgo,e,42.5431,21.5649,56.6508,5.737011,0.0717258,0.66178105,215.4034,04/18.0/2013,2000,H10.77,0.15
input is a single line of text, output will include a newline character within it (but no newline at end)
"""
output = '# From ' + input[107:116] + '\n'
output += input[166:194].strip().replace('(','').replace(')','') + ','
output += 'e,'
output += input[59:68].strip() + ',' # i = inclination, degrees
output += input[48:57].strip() + ',' # O = longitude of ascending node, degrees
output += input[37:46].strip() + ',' # o = argument of perihelion, degrees
output += input[92:103].strip() + ',' # a = mean distance (aka semi-major axis), AU
output += input[80:91].strip() + ',' # n = mean daily motion, degrees per day (computed from a**3/2 if omitted)
output += input[70:79].strip() + ',' # e = eccentricity, must be < 1
output += input[26:35].strip() + ',' # M = mean anomaly, i.e., degrees from perihelion
output += unpack_mpc_date(input[20:25].strip()).strftime('%m/%d/%Y') + ',' # E = epoch date, i.e., time of M
output += '2000,' # D = the equinox year, i.e., time of i, O and o (always J2000.0 in MPCORB.DAT
output += 'H' + input[8:13].strip() + ',' # First component of magnitude model
output += input[14:19].strip() # Second component of magnitude model
return output
def minorplanets(in_datetime, observatory_code,
max_objects=None,
max_magnitude=None, require_magnitude=True,
max_zenithdistance_deg=90.0,
min_heliocentric_distance_AU=None, max_heliocentric_distance_AU=None,
min_topocentric_distance_AU=None, max_topocentric_distance_AU=None):
"""
in_datetime - datetime.datetime(), e.g. datetime.datetime.utcnow()
observatory_code - the Code of the observatory in
http://www.minorplanetcenter.net/iau/lists/ObsCodes.html
can be either string or integer.
max_objects - default is None, otherwise limits the return to this number
of observable objects
max_magnitude - default is None, otherwise limits return to objects
brighter than or equal to this magnitude
(as calculated by PyEphem from the MPC data)
(TODO: confirm whether this is V-band, R-band,
or other...)
require_magnitude - default is True. If False and max_magnitude is None,
then return all objects, whether PyEphem can calculate
a magnitude or not.
max_zenithdistance_deg - default is 90 degrees (horizon)
min/max_heliocentric_distance_AU - defaults are None
min/max_topocentric_distance_AU - defaults are None
"""
obs_info = get_latlon_from_observatory_code(observatory_code)
obs = ephem.Observer()
obs.lat = np.radians(obs_info['latitude'])
obs.lon = np.radians(obs_info['longitude'])
obs.date = _convert_datetime_to_pyephem_date_string(in_datetime)
mpc_filename = _find_cached_file('MPCORB.DAT')
if mpc_filename == 'File Not Found':
raise Error("Problem reading MPCORB.DAT file from disk. \n"
"Most likely you need to go download a copy from: \n"
" http://www.minorplanetcenter.net/iau/MPCORB/MPCORB.DAT")
if max_magnitude is not None:
require_magnitude = True
matching_objects = []
with open(mpc_filename) as f:
in_header = True
for line in f:
if in_header is False and len(line) > 1:
if (not require_magnitude) or (require_magnitude and (line[8:13] != ' ')):
eph = ephem.readdb(convert_mpcorb_to_xephem(line).splitlines()[1])
eph.compute(obs)
if (max_magnitude is None) or (eph.mag <= max_magnitude):
if ((max_zenithdistance_deg is None) or
(np.degrees(np.pi/2. - eph.alt) <= max_zenithdistance_deg)):
if ((min_heliocentric_distance_AU is None) or
(eph.sun_distance >= min_heliocentric_distance_AU)):
if ((max_heliocentric_distance_AU is None) or
(eph.sun_distance <= max_heliocentric_distance_AU)):
if ((min_topocentric_distance_AU is None) or
(eph.earth_distance >= min_topocentric_distance_AU)):
if ((max_topocentric_distance_AU is None) or
(eph.earth_distance <= max_topocentric_distance_AU)):
matching_objects.append(eph)
else:
if line.startswith('-------------------'):
in_header = False
if max_objects is not None:
if len(matching_objects) >= max_objects:
break
name = [a.name for a in matching_objects]
d = {}
d['rise_time'] = [a.rise_time.datetime() if a.rise_time is not None else np.nan for a in matching_objects]
d['transit_time'] = [a.transit_time.datetime() if a.transit_time is not None else np.nan for a in matching_objects]
d['set_time'] = [a.set_time.datetime() if a.set_time is not None else np.nan for a in matching_objects]
d['raJ2000_deg'] = [np.degrees(a.a_ra) for a in matching_objects]
d['decJ2000_deg'] = [np.degrees(a.a_dec) for a in matching_objects]
d['mag'] = [a.mag for a in matching_objects]
d['R_AU'] = [a.sun_distance for a in matching_objects]
d['delta_AU'] = [a.earth_distance for a in matching_objects]
moon = ephem.Moon()
moon.compute(obs.date)
d['O-E-M_deg'] = [np.degrees(ephem.separation(moon, a)) for a in matching_objects]
output = DataFrame(d, index=name)
output = output[['rise_time', 'transit_time', 'set_time', 'raJ2000_deg', 'decJ2000_deg',
'mag', 'R_AU', 'delta_AU', 'O-E-M_deg']] # re-order columns to something sensible
return output
| mit |
fyffyt/scikit-learn | sklearn/metrics/setup.py | 299 | 1024 | import os
import os.path
import numpy
from numpy.distutils.misc_util import Configuration
from sklearn._build_utils import get_blas_info
def configuration(parent_package="", top_path=None):
config = Configuration("metrics", parent_package, top_path)
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
config.add_extension("pairwise_fast",
sources=["pairwise_fast.c"],
include_dirs=[os.path.join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
libraries=cblas_libs,
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
**blas_info)
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration().todict())
| bsd-3-clause |
chrisbarber/dask | dask/dataframe/io/tests/test_io.py | 2 | 19842 | import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pytest
from distutils.version import LooseVersion
from threading import Lock
import threading
import dask.array as da
import dask.dataframe as dd
from dask.dataframe.io.io import _meta_from_array
from dask.delayed import Delayed, delayed
from dask.utils import tmpfile
from dask.async import get_sync
from dask.dataframe.utils import assert_eq
####################
# Arrays and BColz #
####################
def test_meta_from_array():
x = np.array([[1, 2], [3, 4]], dtype=np.int64)
res = _meta_from_array(x)
assert isinstance(res, pd.DataFrame)
assert res[0].dtype == np.int64
assert res[1].dtype == np.int64
tm.assert_index_equal(res.columns, pd.Index([0, 1]))
x = np.array([[1., 2.], [3., 4.]], dtype=np.float64)
res = _meta_from_array(x, columns=['a', 'b'])
assert isinstance(res, pd.DataFrame)
assert res['a'].dtype == np.float64
assert res['b'].dtype == np.float64
tm.assert_index_equal(res.columns, pd.Index(['a', 'b']))
with pytest.raises(ValueError):
_meta_from_array(x, columns=['a', 'b', 'c'])
np.random.seed(42)
x = np.random.rand(201, 2)
x = dd.from_array(x, chunksize=50, columns=['a', 'b'])
assert len(x.divisions) == 6 # Should be 5 partitions and the end
def test_meta_from_1darray():
x = np.array([1., 2., 3.], dtype=np.float64)
res = _meta_from_array(x)
assert isinstance(res, pd.Series)
assert res.dtype == np.float64
x = np.array([1, 2, 3], dtype=np.object_)
res = _meta_from_array(x, columns='x')
assert isinstance(res, pd.Series)
assert res.name == 'x'
assert res.dtype == np.object_
x = np.array([1, 2, 3], dtype=np.object_)
res = _meta_from_array(x, columns=['x'])
assert isinstance(res, pd.DataFrame)
assert res['x'].dtype == np.object_
tm.assert_index_equal(res.columns, pd.Index(['x']))
with pytest.raises(ValueError):
_meta_from_array(x, columns=['a', 'b'])
def test_meta_from_recarray():
x = np.array([(i, i * 10) for i in range(10)],
dtype=[('a', np.float64), ('b', np.int64)])
res = _meta_from_array(x)
assert isinstance(res, pd.DataFrame)
assert res['a'].dtype == np.float64
assert res['b'].dtype == np.int64
tm.assert_index_equal(res.columns, pd.Index(['a', 'b']))
res = _meta_from_array(x, columns=['b', 'a'])
assert isinstance(res, pd.DataFrame)
assert res['a'].dtype == np.float64
assert res['b'].dtype == np.int64
tm.assert_index_equal(res.columns, pd.Index(['b', 'a']))
with pytest.raises(ValueError):
_meta_from_array(x, columns=['a', 'b', 'c'])
def test_from_array():
x = np.arange(10 * 3).reshape(10, 3)
d = dd.from_array(x, chunksize=4)
assert isinstance(d, dd.DataFrame)
tm.assert_index_equal(d.columns, pd.Index([0, 1, 2]))
assert d.divisions == (0, 4, 8, 9)
assert (d.compute().values == x).all()
d = dd.from_array(x, chunksize=4, columns=list('abc'))
assert isinstance(d, dd.DataFrame)
tm.assert_index_equal(d.columns, pd.Index(['a', 'b', 'c']))
assert d.divisions == (0, 4, 8, 9)
assert (d.compute().values == x).all()
with pytest.raises(ValueError):
dd.from_array(np.ones(shape=(10, 10, 10)))
def test_from_array_with_record_dtype():
x = np.array([(i, i * 10) for i in range(10)],
dtype=[('a', 'i4'), ('b', 'i4')])
d = dd.from_array(x, chunksize=4)
assert isinstance(d, dd.DataFrame)
assert list(d.columns) == ['a', 'b']
assert d.divisions == (0, 4, 8, 9)
assert (d.compute().to_records(index=False) == x).all()
def test_from_bcolz_multiple_threads():
bcolz = pytest.importorskip('bcolz')
def check():
t = bcolz.ctable([[1, 2, 3], [1., 2., 3.], ['a', 'b', 'a']],
names=['x', 'y', 'a'])
d = dd.from_bcolz(t, chunksize=2)
assert d.npartitions == 2
assert str(d.dtypes['a']) == 'category'
assert list(d.x.compute(get=get_sync)) == [1, 2, 3]
assert list(d.a.compute(get=get_sync)) == ['a', 'b', 'a']
d = dd.from_bcolz(t, chunksize=2, index='x')
L = list(d.index.compute(get=get_sync))
assert L == [1, 2, 3] or L == [1, 3, 2]
# Names
assert (sorted(dd.from_bcolz(t, chunksize=2).dask) ==
sorted(dd.from_bcolz(t, chunksize=2).dask))
assert (sorted(dd.from_bcolz(t, chunksize=2).dask) !=
sorted(dd.from_bcolz(t, chunksize=3).dask))
threads = []
for i in range(5):
thread = threading.Thread(target=check)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
def test_from_bcolz():
bcolz = pytest.importorskip('bcolz')
t = bcolz.ctable([[1, 2, 3], [1., 2., 3.], ['a', 'b', 'a']],
names=['x', 'y', 'a'])
d = dd.from_bcolz(t, chunksize=2)
assert d.npartitions == 2
assert str(d.dtypes['a']) == 'category'
assert list(d.x.compute(get=get_sync)) == [1, 2, 3]
assert list(d.a.compute(get=get_sync)) == ['a', 'b', 'a']
L = list(d.index.compute(get=get_sync))
assert L == [0, 1, 2]
d = dd.from_bcolz(t, chunksize=2, index='x')
L = list(d.index.compute(get=get_sync))
assert L == [1, 2, 3] or L == [1, 3, 2]
# Names
assert (sorted(dd.from_bcolz(t, chunksize=2).dask) ==
sorted(dd.from_bcolz(t, chunksize=2).dask))
assert (sorted(dd.from_bcolz(t, chunksize=2).dask) !=
sorted(dd.from_bcolz(t, chunksize=3).dask))
dsk = dd.from_bcolz(t, chunksize=3).dask
t.append((4, 4., 'b'))
t.flush()
assert (sorted(dd.from_bcolz(t, chunksize=2).dask) !=
sorted(dsk))
def test_from_bcolz_no_lock():
bcolz = pytest.importorskip('bcolz')
locktype = type(Lock())
t = bcolz.ctable([[1, 2, 3], [1., 2., 3.], ['a', 'b', 'a']],
names=['x', 'y', 'a'], chunklen=2)
a = dd.from_bcolz(t, chunksize=2)
b = dd.from_bcolz(t, chunksize=2, lock=True)
c = dd.from_bcolz(t, chunksize=2, lock=False)
assert_eq(a, b)
assert_eq(a, c)
assert not any(isinstance(item, locktype)
for v in c.dask.values()
for item in v)
def test_from_bcolz_filename():
bcolz = pytest.importorskip('bcolz')
with tmpfile('.bcolz') as fn:
t = bcolz.ctable([[1, 2, 3], [1., 2., 3.], ['a', 'b', 'a']],
names=['x', 'y', 'a'],
rootdir=fn)
t.flush()
d = dd.from_bcolz(fn, chunksize=2)
assert list(d.x.compute()) == [1, 2, 3]
def test_from_bcolz_column_order():
bcolz = pytest.importorskip('bcolz')
t = bcolz.ctable([[1, 2, 3], [1., 2., 3.], ['a', 'b', 'a']],
names=['x', 'y', 'a'])
df = dd.from_bcolz(t, chunksize=2)
assert list(df.loc[0].compute().columns) == ['x', 'y', 'a']
def test_from_pandas_dataframe():
a = list('aaaaaaabbbbbbbbccccccc')
df = pd.DataFrame(dict(a=a, b=np.random.randn(len(a))),
index=pd.date_range(start='20120101', periods=len(a)))
ddf = dd.from_pandas(df, 3)
assert len(ddf.dask) == 3
assert len(ddf.divisions) == len(ddf.dask) + 1
assert isinstance(ddf.divisions[0], type(df.index[0]))
tm.assert_frame_equal(df, ddf.compute())
ddf = dd.from_pandas(df, chunksize=8)
msg = 'Exactly one of npartitions and chunksize must be specified.'
with tm.assertRaisesRegexp(ValueError, msg):
dd.from_pandas(df, npartitions=2, chunksize=2)
with tm.assertRaisesRegexp((ValueError, AssertionError), msg):
dd.from_pandas(df)
assert len(ddf.dask) == 3
assert len(ddf.divisions) == len(ddf.dask) + 1
assert isinstance(ddf.divisions[0], type(df.index[0]))
tm.assert_frame_equal(df, ddf.compute())
def test_from_pandas_small():
df = pd.DataFrame({'x': [1, 2, 3]})
for i in [1, 2, 30]:
a = dd.from_pandas(df, i)
assert len(a.compute()) == 3
assert a.divisions[0] == 0
assert a.divisions[-1] == 2
a = dd.from_pandas(df, chunksize=i)
assert len(a.compute()) == 3
assert a.divisions[0] == 0
assert a.divisions[-1] == 2
for sort in [True, False]:
for i in [0, 2]:
df = pd.DataFrame({'x': [0] * i})
ddf = dd.from_pandas(df, npartitions=5, sort=sort)
assert_eq(df, ddf)
s = pd.Series([0] * i, name='x')
ds = dd.from_pandas(s, npartitions=5, sort=sort)
assert_eq(s, ds)
@pytest.mark.xfail(reason="")
def test_from_pandas_npartitions_is_accurate():
df = pd.DataFrame({'x': [1, 2, 3, 4, 5, 6], 'y': list('abdabd')},
index=[10, 20, 30, 40, 50, 60])
for n in [1, 2, 4, 5]:
assert dd.from_pandas(df, npartitions=n).npartitions == n
def test_from_pandas_series():
n = 20
s = pd.Series(np.random.randn(n),
index=pd.date_range(start='20120101', periods=n))
ds = dd.from_pandas(s, 3)
assert len(ds.dask) == 3
assert len(ds.divisions) == len(ds.dask) + 1
assert isinstance(ds.divisions[0], type(s.index[0]))
tm.assert_series_equal(s, ds.compute())
ds = dd.from_pandas(s, chunksize=8)
assert len(ds.dask) == 3
assert len(ds.divisions) == len(ds.dask) + 1
assert isinstance(ds.divisions[0], type(s.index[0]))
tm.assert_series_equal(s, ds.compute())
def test_from_pandas_non_sorted():
df = pd.DataFrame({'x': [1, 2, 3]}, index=[3, 1, 2])
ddf = dd.from_pandas(df, npartitions=2, sort=False)
assert not ddf.known_divisions
assert_eq(df, ddf)
ddf = dd.from_pandas(df, chunksize=2, sort=False)
assert not ddf.known_divisions
assert_eq(df, ddf)
def test_from_pandas_single_row():
df = pd.DataFrame({'x': [1]}, index=[1])
ddf = dd.from_pandas(df, npartitions=1)
assert ddf.divisions == (1, 1)
assert_eq(ddf, df)
def test_from_pandas_with_datetime_index():
df = pd.DataFrame({"Date": ["2015-08-28", "2015-08-27", "2015-08-26",
"2015-08-25", "2015-08-24", "2015-08-21",
"2015-08-20", "2015-08-19", "2015-08-18"],
"Val": list(range(9))})
df.Date = df.Date.astype('datetime64')
ddf = dd.from_pandas(df, 2)
assert_eq(df, ddf)
ddf = dd.from_pandas(df, chunksize=2)
assert_eq(df, ddf)
def test_DataFrame_from_dask_array():
x = da.ones((10, 3), chunks=(4, 2))
df = dd.from_dask_array(x, ['a', 'b', 'c'])
assert isinstance(df, dd.DataFrame)
tm.assert_index_equal(df.columns, pd.Index(['a', 'b', 'c']))
assert list(df.divisions) == [0, 4, 8, 9]
assert (df.compute(get=get_sync).values == x.compute(get=get_sync)).all()
# dd.from_array should re-route to from_dask_array
df2 = dd.from_array(x, columns=['a', 'b', 'c'])
assert isinstance(df, dd.DataFrame)
tm.assert_index_equal(df2.columns, df.columns)
assert df2.divisions == df.divisions
def test_Series_from_dask_array():
x = da.ones(10, chunks=4)
ser = dd.from_dask_array(x, 'a')
assert isinstance(ser, dd.Series)
assert ser.name == 'a'
assert list(ser.divisions) == [0, 4, 8, 9]
assert (ser.compute(get=get_sync).values == x.compute(get=get_sync)).all()
ser = dd.from_dask_array(x)
assert isinstance(ser, dd.Series)
assert ser.name is None
# dd.from_array should re-route to from_dask_array
ser2 = dd.from_array(x)
assert isinstance(ser2, dd.Series)
assert_eq(ser, ser2)
def test_from_dask_array_compat_numpy_array():
x = da.ones((3, 3, 3), chunks=2)
with pytest.raises(ValueError):
dd.from_dask_array(x) # dask
with pytest.raises(ValueError):
dd.from_array(x.compute()) # numpy
x = da.ones((10, 3), chunks=(3, 3))
d1 = dd.from_dask_array(x) # dask
assert isinstance(d1, dd.DataFrame)
assert (d1.compute().values == x.compute()).all()
tm.assert_index_equal(d1.columns, pd.Index([0, 1, 2]))
d2 = dd.from_array(x.compute()) # numpy
assert isinstance(d1, dd.DataFrame)
assert (d2.compute().values == x.compute()).all()
tm.assert_index_equal(d2.columns, pd.Index([0, 1, 2]))
with pytest.raises(ValueError):
dd.from_dask_array(x, columns=['a']) # dask
with pytest.raises(ValueError):
dd.from_array(x.compute(), columns=['a']) # numpy
d1 = dd.from_dask_array(x, columns=['a', 'b', 'c']) # dask
assert isinstance(d1, dd.DataFrame)
assert (d1.compute().values == x.compute()).all()
tm.assert_index_equal(d1.columns, pd.Index(['a', 'b', 'c']))
d2 = dd.from_array(x.compute(), columns=['a', 'b', 'c']) # numpy
assert isinstance(d1, dd.DataFrame)
assert (d2.compute().values == x.compute()).all()
tm.assert_index_equal(d2.columns, pd.Index(['a', 'b', 'c']))
def test_from_dask_array_compat_numpy_array_1d():
x = da.ones(10, chunks=3)
d1 = dd.from_dask_array(x) # dask
assert isinstance(d1, dd.Series)
assert (d1.compute().values == x.compute()).all()
assert d1.name is None
d2 = dd.from_array(x.compute()) # numpy
assert isinstance(d1, dd.Series)
assert (d2.compute().values == x.compute()).all()
assert d2.name is None
d1 = dd.from_dask_array(x, columns='name') # dask
assert isinstance(d1, dd.Series)
assert (d1.compute().values == x.compute()).all()
assert d1.name == 'name'
d2 = dd.from_array(x.compute(), columns='name') # numpy
assert isinstance(d1, dd.Series)
assert (d2.compute().values == x.compute()).all()
assert d2.name == 'name'
# passing list via columns results in DataFrame
d1 = dd.from_dask_array(x, columns=['name']) # dask
assert isinstance(d1, dd.DataFrame)
assert (d1.compute().values == x.compute()).all()
tm.assert_index_equal(d1.columns, pd.Index(['name']))
d2 = dd.from_array(x.compute(), columns=['name']) # numpy
assert isinstance(d1, dd.DataFrame)
assert (d2.compute().values == x.compute()).all()
tm.assert_index_equal(d2.columns, pd.Index(['name']))
def test_from_dask_array_struct_dtype():
x = np.array([(1, 'a'), (2, 'b')], dtype=[('a', 'i4'), ('b', 'object')])
y = da.from_array(x, chunks=(1,))
df = dd.from_dask_array(y)
tm.assert_index_equal(df.columns, pd.Index(['a', 'b']))
assert_eq(df, pd.DataFrame(x))
assert_eq(dd.from_dask_array(y, columns=['b', 'a']),
pd.DataFrame(x, columns=['b', 'a']))
def test_to_castra():
castra = pytest.importorskip('castra')
blosc = pytest.importorskip('blosc')
if (LooseVersion(blosc.__version__) == '1.3.0' or
LooseVersion(castra.__version__) < '0.1.8'):
pytest.skip()
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [2, 3, 4, 5]},
index=pd.Index([1., 2., 3., 4.], name='ind'))
a = dd.from_pandas(df, 2)
c = a.to_castra()
b = c.to_dask()
try:
tm.assert_frame_equal(df, c[:])
tm.assert_frame_equal(b.compute(), df)
finally:
c.drop()
c = a.to_castra(categories=['x'])
try:
assert c[:].dtypes['x'] == 'category'
finally:
c.drop()
c = a.to_castra(sorted_index_column='y')
try:
tm.assert_frame_equal(c[:], df.set_index('y'))
finally:
c.drop()
delayed = a.to_castra(compute=False)
assert isinstance(delayed, Delayed)
c = delayed.compute()
try:
tm.assert_frame_equal(c[:], df)
finally:
c.drop()
# make sure compute=False preserves the same interface
c1 = a.to_castra(compute=True)
c2 = a.to_castra(compute=False).compute()
try:
tm.assert_frame_equal(c1[:], c2[:])
finally:
c1.drop()
c2.drop()
def test_from_castra():
castra = pytest.importorskip('castra')
blosc = pytest.importorskip('blosc')
if (LooseVersion(blosc.__version__) == '1.3.0' or
LooseVersion(castra.__version__) < '0.1.8'):
pytest.skip()
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [2, 3, 4, 5]},
index=pd.Index([1., 2., 3., 4.], name='ind'))
a = dd.from_pandas(df, 2)
c = a.to_castra()
with_castra = dd.from_castra(c)
with_fn = dd.from_castra(c.path)
with_columns = dd.from_castra(c, 'x')
try:
tm.assert_frame_equal(df, with_castra.compute())
tm.assert_frame_equal(df, with_fn.compute())
tm.assert_series_equal(df.x, with_columns.compute())
finally:
# Calling c.drop() is a race condition on drop from `with_fn.__del__`
# and c.drop. Manually `del`ing gets around this.
del with_fn, c
def test_from_castra_with_selection():
""" Optimizations fuse getitems with load_partitions
We used to use getitem for both column access and selections
"""
castra = pytest.importorskip('castra')
blosc = pytest.importorskip('blosc')
if (LooseVersion(blosc.__version__) == '1.3.0' or
LooseVersion(castra.__version__) < '0.1.8'):
pytest.skip()
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [2, 3, 4, 5]},
index=pd.Index([1., 2., 3., 4.], name='ind'))
a = dd.from_pandas(df, 2)
b = dd.from_castra(a.to_castra())
assert_eq(b[b.y > 3].x, df[df.y > 3].x)
def test_to_bag():
pytest.importorskip('dask.bag')
a = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [2, 3, 4, 5]},
index=pd.Index([1., 2., 3., 4.], name='ind'))
ddf = dd.from_pandas(a, 2)
assert ddf.to_bag().compute() == list(a.itertuples(False))
assert ddf.to_bag(True).compute() == list(a.itertuples(True))
assert ddf.x.to_bag(True).compute() == list(a.x.iteritems())
assert ddf.x.to_bag().compute() == list(a.x)
def test_to_records():
pytest.importorskip('dask.array')
from dask.array.utils import assert_eq
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [2, 3, 4, 5]},
index=pd.Index([1., 2., 3., 4.], name='ind'))
ddf = dd.from_pandas(df, 2)
assert_eq(df.to_records(), ddf.to_records())
def test_from_delayed():
df = pd.DataFrame(data=np.random.normal(size=(10, 4)), columns=list('abcd'))
parts = [df.iloc[:1], df.iloc[1:3], df.iloc[3:6], df.iloc[6:10]]
dfs = [delayed(parts.__getitem__)(i) for i in range(4)]
meta = dfs[0].compute()
my_len = lambda x: pd.Series([len(x)])
for divisions in [None, [0, 1, 3, 6, 10]]:
ddf = dd.from_delayed(dfs, meta=meta, divisions=divisions)
assert_eq(ddf, df)
assert list(ddf.map_partitions(my_len).compute()) == [1, 2, 3, 4]
assert ddf.known_divisions == (divisions is not None)
s = dd.from_delayed([d.a for d in dfs], meta=meta.a,
divisions=divisions)
assert_eq(s, df.a)
assert list(s.map_partitions(my_len).compute()) == [1, 2, 3, 4]
assert ddf.known_divisions == (divisions is not None)
with pytest.raises(ValueError):
dd.from_delayed(dfs, meta=meta, divisions=[0, 1, 3, 6])
def test_from_delayed_sorted():
a = pd.DataFrame({'x': [1, 2]}, index=[1, 10])
b = pd.DataFrame({'x': [4, 1]}, index=[100, 200])
A = dd.from_delayed([delayed(a), delayed(b)], divisions='sorted')
assert A.known_divisions
assert A.divisions == (1, 100, 200)
def test_to_delayed():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
a, b = ddf.to_delayed()
assert isinstance(a, Delayed)
assert isinstance(b, Delayed)
assert_eq(a.compute(), df.iloc[:2])
| bsd-3-clause |
BlueBrain/deap | examples/es/cma_plotting.py | 12 | 4326 | # This file is part of DEAP.
#
# DEAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# DEAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with DEAP. If not, see <http://www.gnu.org/licenses/>.
import numpy
from deap import algorithms
from deap import base
from deap import benchmarks
from deap import cma
from deap import creator
from deap import tools
import matplotlib.pyplot as plt
# Problem size
N = 10
NGEN = 125
creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
creator.create("Individual", list, fitness=creator.FitnessMin)
toolbox = base.Toolbox()
toolbox.register("evaluate", benchmarks.rastrigin)
def main(verbose=True):
# The cma module uses the numpy random number generator
numpy.random.seed(64)
# The CMA-ES algorithm takes a population of one individual as argument
# The centroid is set to a vector of 5.0 see http://www.lri.fr/~hansen/cmaes_inmatlab.html
# for more details about the rastrigin and other tests for CMA-ES
strategy = cma.Strategy(centroid=[5.0]*N, sigma=5.0, lambda_=20*N)
toolbox.register("generate", strategy.generate, creator.Individual)
toolbox.register("update", strategy.update)
halloffame = tools.HallOfFame(1)
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("avg", numpy.mean)
stats.register("std", numpy.std)
stats.register("min", numpy.min)
stats.register("max", numpy.max)
logbook = tools.Logbook()
logbook.header = "gen", "evals", "std", "min", "avg", "max"
# Objects that will compile the data
sigma = numpy.ndarray((NGEN,1))
axis_ratio = numpy.ndarray((NGEN,1))
diagD = numpy.ndarray((NGEN,N))
fbest = numpy.ndarray((NGEN,1))
best = numpy.ndarray((NGEN,N))
std = numpy.ndarray((NGEN,N))
for gen in range(NGEN):
# Generate a new population
population = toolbox.generate()
# Evaluate the individuals
fitnesses = toolbox.map(toolbox.evaluate, population)
for ind, fit in zip(population, fitnesses):
ind.fitness.values = fit
# Update the strategy with the evaluated individuals
toolbox.update(population)
# Update the hall of fame and the statistics with the
# currently evaluated population
halloffame.update(population)
record = stats.compile(population)
logbook.record(evals=len(population), gen=gen, **record)
if verbose:
print(logbook.stream)
# Save more data along the evolution for latter plotting
# diagD is sorted and sqrooted in the update method
sigma[gen] = strategy.sigma
axis_ratio[gen] = max(strategy.diagD)**2/min(strategy.diagD)**2
diagD[gen, :N] = strategy.diagD**2
fbest[gen] = halloffame[0].fitness.values
best[gen, :N] = halloffame[0]
std[gen, :N] = numpy.std(population, axis=0)
# The x-axis will be the number of evaluations
x = list(range(0, strategy.lambda_ * NGEN, strategy.lambda_))
avg, max_, min_ = logbook.select("avg", "max", "min")
plt.figure()
plt.subplot(2, 2, 1)
plt.semilogy(x, avg, "--b")
plt.semilogy(x, max_, "--b")
plt.semilogy(x, min_, "-b")
plt.semilogy(x, fbest, "-c")
plt.semilogy(x, sigma, "-g")
plt.semilogy(x, axis_ratio, "-r")
plt.grid(True)
plt.title("blue: f-values, green: sigma, red: axis ratio")
plt.subplot(2, 2, 2)
plt.plot(x, best)
plt.grid(True)
plt.title("Object Variables")
plt.subplot(2, 2, 3)
plt.semilogy(x, diagD)
plt.grid(True)
plt.title("Scaling (All Main Axes)")
plt.subplot(2, 2, 4)
plt.semilogy(x, std)
plt.grid(True)
plt.title("Standard Deviations in All Coordinates")
plt.show()
if __name__ == "__main__":
main(False)
| lgpl-3.0 |
DTMilodowski/LiDAR_canopy | src/scottish_understory/SEOS_lidar_summary_figures.py | 1 | 5222 | """
SEOS_lidar_summary_figures.py
--------------------------------------------------------------------------------
Code to generate a suite of figures summarising the results from the SEOS
UAV LiDAR surveys.
Analysis focussed on site 1
Encompasses:
- Canopy density profiles, inverted from point clouds
- a random selection of 5m x 5m resolution profiles
- Sensitivity analysis to spatial resolution (1 ha)
- Sensitivity analysis to pulse density (1 ha)
- Sensitivity analysis to pulse density (5m x 5m individual profiles)
- 3D mapping of canopy density
--------------------------------------------------------------------------------
"""
# import standard libraries
import os
import numpy as np
import xarray as xr
from matplotlib import pyplot as plt
import seaborn as sns
sns.set()
# import general LiDAR libraries
import sys
sys.path.append('../')
import LiDAR_io as io
import LiDAR_MacHorn_LAD_profiles as LAD
# import SEOS specific libraries
import SEOS_sensitivity_analysis_plots as splt
import SEOS_profile_plots as lplt
"""
PART A: LOAD DATA
--------------------------------------------------------------------------------
Load in:
LiDAR data
Sensitivity analyses results
--------------------------------------------------------------------------------
"""
# Define paths etc
file_id = 'carbomap_site1_5m'
path2data = '/exports/csce/datastore/geos/users/dmilodow/FOREST2020/LiDAR/carbomap_highlands/canopy_metrics/'
path2fig = '/exports/csce/datastore/geos/users/dmilodow/FOREST2020/LiDAR/carbomap_highlands/figures/'
os.system('mkdir %s/sensitivity_analysis' % path2fig)
las_list = '../las_lists/carbomap_site1_lastiles.txt'
pad_file = '%s%s_pad.tif' % (path2data,file_id)
# Load LiDAR data by bounding box
N=6206000.
S=6205900.
W=344520.
E=344620.
plot_bbox = np.asarray([[W,N],[E,N],[E,S],[W,S]])
pts, starting_ids, trees = io.load_lidar_data_by_polygon(las_list,plot_bbox,laz_files=False,max_pts_per_tree = 5*10**5)
N_trees = len(trees)
# filter LiDAR data by return classification
pts[np.any((pts[:,4]==3,pts[:,4]==4,pts[:,4]==5),axis=0),4]=1
pts[pts[:,2]<0,2]=0
# Load sensitivity analysis to spatial resolution
resolution_sensitivity = np.load('SEOS_MH_sensitivity_resolution.npy')[()]
density_sensitivity = np.load('SEOS_MH_sensitivity_pulse_density.npy')[()]
# Load 3D maps of canopy density
pad = xr.open_rasterio(pad_file)
# other parameters
max_height = 40.
layer_thickness = 0.5
heights = np.arange(0.,max_height,layer_thickness)+0.5
n_layers = heights.size
plot_width = 100.
sample_res = 5.
kappa = 1.
n_iter = 100
# bin lidar returns
heights,first_return_profile,n_ground_returns = LAD.bin_returns(pts, max_height, layer_thickness)
"""
PART B: Plot canopy profiles
--------------------------------------------------------------------------------
A couple of plots here
1) point cloud transect; 1 ha vertical point cloud distributions; 1 ha average
canopy profile plus bootstrap 95% CI
2) Random sample of individual 5m x 5m profiles from within the 1 ha plot
--------------------------------------------------------------------------------
"""
lplt.plot_point_cloud_and_profiles('%s/SEOS_point_cloud_canopy_profiles.png' % path2fig,
pts,resolution_sensitivity['5m'],heights,first_return_profile)
"""
PART C: Sensitivity analysis
--------------------------------------------------------------------------------
Three plots here
1) Sensitivity of 1ha average to spatial resolution
2) Sensitivity of 1ha average to pulse density
3) Sensitivity of individual profiles to pulse density
--------------------------------------------------------------------------------
"""
# 1 ha average profile sensitivity to spatial resolution
splt.plot_profile_sensitivity_density('%s/sensitivity_analysis/SEOS_sensitivity_analysis_resolution.png' % path2fig,
heights,resolution_sensitivity)
# 1 ha average profile sensitivity to point density
splt.plot_profile_sensitivity_density('%s/sensitivity_analysis/SEOS_sensitivity_analysis_density.png' % path2fig,
heights,density_sensitivity)
# 5m x 5m profiles sensitivity to point density
N = 10
for ii in range(0, N):
profile_idx = np.random.randint(density_sensitivity['20'].shape[1])
splt.plot_profile_sensitivity_density_individual_profile('%s/sensitivity_analysis/SEOS_sensitivity_analysis_density_individual_5m_profile_%i.png' % (path2fig,profile_idx),
heights,density_sensitivity,profile_idx=profile_idx)
"""
PART D: 3D map of plant area density
--------------------------------------------------------------------------------
"""
p = pad.plot(x='x',y='y',col='band',col_wrap=8,vmin=0,vmax=1,cmap='plasma',
cbar_kwargs={'label':'PAI for each 1m slice in canopy'})
for a in p.axes.flat:
a.set_aspect('equal')
a.set_xlabel(None)
a.set_ylabel(None)
a.set_title(None)
a.set_xticklabels(a.get_xticklabels(),visible=False)
a.set_yticklabels(a.get_yticklabels(),visible=False)
p.axes.flat[-1].plot([344672.5,345172.5],[6205442.5,6205442.5],'-',color='black',linewidth='2')
p.axes.flat[-1].annotate('500 m',xy=(344922.5,6205392.5), xycoords='data',
backgroundcolor='none',ha='center', va='top')
plt.savefig('%s%s_pai_by_canopy_layer.png' % (path2fig,file_id))
plt.show()
| gpl-3.0 |
Snazz2001/BDA_py_demos | demos_ch3/demo3_1.py | 19 | 3966 | """Bayesian Data Analysis, 3r ed
Chapter 3, demo 1
Visualise the joint density and marginal densities of posterior of normal
distribution with unknown mean and variance.
"""
from __future__ import division
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
# import from utilities
import os
util_path = '../utilities_and_data' # provide path to utilities
util_path = os.path.abspath(util_path)
if util_path not in os.sys.path and os.path.exists(util_path):
os.sys.path.insert(0, util_path)
import sinvchi2
# Edit default plot settings (colours from colorbrewer2.org)
plt.rc('font', size=14)
plt.rc('lines', color='#377eb8')
plt.rc('axes', color_cycle=('#377eb8','#e41a1c','#4daf4a',
'#984ea3','#ff7f00','#ffff33'))
# data
y = np.array([93, 112, 122, 135, 122, 150, 118, 90, 124, 114])
# sufficient statistics
n = len(y)
s2 = np.var(y, ddof=1) # Here ddof=1 is used to get the sample estimate.
my = np.mean(y)
# Factorize the joint posterior p(mu,sigma2|y) to p(sigma2|y)p(mu|sigma2,y)
# Sample from the joint posterior using this factorization
# sample from p(sigma2|y)
sigma2 = sinvchi2.rvs(n-1, s2, size=1000)
# sample from p(mu|sigma2,y)
mu = my + np.sqrt(sigma2/n)*np.random.randn(*sigma2.shape)
# display sigma instead of sigma2
sigma = np.sqrt(sigma2)
# For mu compute the density in these points
tl1 = [90, 150]
t1 = np.linspace(tl1[0], tl1[1], 1000)
# For sigma compute the density in these points
tl2 = [10, 60]
t2 = np.linspace(tl2[0], tl2[1], 1000)
# evaluate the joint density in a grid
# note that the following is not normalized, but for plotting
# contours it does not matter
Z = stats.norm.pdf(t1, my, t2[:,np.newaxis]/np.sqrt(n))
Z *= (sinvchi2.pdf(t2**2, n-1, s2)*2*t2)[:,np.newaxis]
# compute the exact marginal density for mu
# multiplication by 1./sqrt(s2/n) is due to the transformation of variable
# z=(x-mean(y))/sqrt(s2/n), see BDA3 p. 21
pm_mu = stats.t.pdf((t1 - my) / np.sqrt(s2/n), n-1) / np.sqrt(s2/n)
# estimate the marginal density for mu using samples and an ad hoc
# Gaussian kernel approximation
pk_mu = stats.gaussian_kde(mu).evaluate(t1)
# compute the exact marginal density for sigma
# multiplication by 2*t2 is due to the transformation of variable
# z=t2^2, see BDA3 p. 21
pm_sigma = sinvchi2.pdf(t2**2, n-1, s2)*2*t2
# N.B. this was already calculated in the joint distribution case
# estimate the marginal density for sigma using samples and an ad hoc Gaussian
# kernel approximation
pk_sigma = stats.gaussian_kde(sigma).evaluate(t2)
# ====== Plotting
# create figure
plotgrid = gridspec.GridSpec(2, 2, width_ratios=[3,2], height_ratios=[3,2])
plt.figure(figsize=(12,12))
# plot the joint distribution
plt.subplot(plotgrid[0,0])
# plot the contour plot of the exact posterior (c_levels is used to give
# a vector of linearly spaced values at which levels contours are drawn)
c_levels = np.linspace(1e-5, Z.max(), 6)[:-1]
plt.contour(t1, t2, Z, c_levels, colors='blue')
# plot the samples from the joint posterior
samps = plt.scatter(mu, sigma, 5, color=[0.25, 0.75, 0.25])
# decorate
plt.xlim(tl1)
plt.ylim(tl2)
plt.xlabel('$\mu$', fontsize=20)
plt.ylabel('$\sigma$', fontsize=20)
plt.title('joint posterior')
plt.legend(
(plt.Line2D([], [], color='blue'), samps),
('exact contour plot', 'samples')
)
# plot the marginal of mu
plt.subplot(plotgrid[1,0])
# empirical
plt.plot(t1, pk_mu, color='#ff8f20', linewidth=2.5, label='empirical')
# exact
plt.plot(t1, pm_mu, 'k--', linewidth=1.5, label='exact')
# decorate
plt.xlim(tl1)
plt.title('marginal of $\mu$')
plt.yticks(())
plt.legend()
# plot the marginal of sigma
plt.subplot(plotgrid[0,1])
# empirical
plt.plot(pk_sigma, t2, color='#ff8f20', linewidth=2.5, label='empirical')
# exact
plt.plot(pm_sigma, t2, 'k--', linewidth=1.5, label='exact')
# decorate
plt.ylim(tl2)
plt.title('marginal of $\sigma$')
plt.xticks(())
plt.legend()
plt.show()
| gpl-3.0 |
jwheatp/eiffelometre | train.py | 1 | 1365 | # imports
import pandas as pd
import numpy as np
from sklearn.svm import SVC
from sklearn.cross_validation import KFold
# import data
# it's a small dataset so we can load it completely
df = pd.read_csv('db_june_dayhourcount', sep=",")
# weather data
df_weather = pd.read_csv('db_june_weather', sep=",")
weather = []
for row in df_weather.iterrows() :
index, data = row
weather.extend(data.tolist())
# insert weather column in dataframe
df["weather"] = weather
# create two variables X (model input) and y (model output) for the model
X = df[["weekday","hour","weather"]].as_matrix()
y = df[["count"]].as_matrix()
y = np.ravel(y)
y = y.astype(float)
# normalize y between 0 and 1 (strictly)
y = (y-min(y))/(max(y)+1-min(y))
# create bins for a discrete frequentation scale
bins_5 = np.array([0,0.2,0.4,0.6,0.8,1])
bins_4 = np.array([0,0.25,0.5,0.75,1])
bins_3 = np.array([0,0.33,0.66,1])
# here we use bins_5
y = np.digitize(y, bins_5)
n = len(y)
# SVM/SVC model
clf = SVC()
# use 5-fold cross-validation to test the model accuracy
kf = KFold(n, n_folds=5, shuffle=True)
scores = []
for train, test in kf:
X_train = [X[i] for i in train]
y_train = [y[i] for i in train]
clf.fit(X_train,y_train)
X_test = [X[i] for i in test]
y_test = [y[i] for i in test]
scores.append(clf.score(X_test,y_test))
# print main accuracy
print(np.mean(scores))
| mit |
flaviovdf/aflux | aflux/dataio.py | 1 | 3090 | #-*- coding: utf8
from __future__ import division, print_function
from collections import defaultdict
from collections import OrderedDict
import numpy as np
import pandas as pd
def save_model(out_fpath, model):
store = pd.HDFStore(out_fpath, 'w')
for model_key in model:
model_val = model[model_key]
if type(model_val) == np.ndarray:
store[model_key] = pd.DataFrame(model_val)
else:
store[model_key] = pd.DataFrame(model_val.items(), \
columns=['Name', 'Id'])
store.close()
def initialize_trace(trace_fpath, num_topics, burn_in):
count_zh_dict = defaultdict(int)
count_sz_dict = defaultdict(int)
count_dz_dict = defaultdict(int)
count_z_dict = defaultdict(int)
count_h_dict = defaultdict(int)
hyper2id = OrderedDict()
source2id = OrderedDict()
dest2id = OrderedDict()
Trace = []
with open(trace_fpath, 'r') as trace_file:
for i, line in enumerate(trace_file):
hyper_str, source_str, dest_str, c = line.strip().split('\t')
c = int(c)
for _ in xrange(c):
if hyper_str not in hyper2id:
hyper2id[hyper_str] = len(hyper2id)
if source_str not in source2id:
source2id[source_str] = len(source2id)
if dest_str not in dest2id:
dest2id[dest_str] = len(dest2id)
h = hyper2id[hyper_str]
s = source2id[source_str]
d = dest2id[dest_str]
z = np.random.randint(num_topics)
count_zh_dict[z, h] += 1
count_sz_dict[s, z] += 1
count_dz_dict[d, z] += 1
count_z_dict[z] += 1
count_h_dict[h] += 1
Trace.append([h, s, d, z])
Trace = np.asarray(Trace, dtype='i4', order='C')
nh = len(hyper2id)
ns = len(source2id)
nd = len(dest2id)
nz = num_topics
Count_zh = np.zeros(shape=(nz, nh), dtype='i4')
Count_sz = np.zeros(shape=(ns, nz), dtype='i4')
Count_dz = np.zeros(shape=(nd, nz), dtype='i4')
count_h = np.zeros(shape=(nh,), dtype='i4')
count_z = np.zeros(shape=(nz,), dtype='i4')
for z in xrange(Count_zh.shape[0]):
count_z[z] = count_z_dict[z]
for h in xrange(Count_zh.shape[1]):
count_h[h] = count_h_dict[h]
Count_zh[z, h] = count_zh_dict[z, h]
for s in xrange(Count_sz.shape[0]):
Count_sz[s, z] = count_sz_dict[s, z]
for d in xrange(Count_dz.shape[0]):
Count_dz[d, z] = count_dz_dict[d, z]
prob_topics_aux = np.zeros(nz, dtype='f8')
Theta_zh = np.zeros(shape=(nz, nh), dtype='f8')
Psi_sz = np.zeros(shape=(ns, nz), dtype='f8')
Psi_dz = np.zeros(shape=(nd, nz), dtype='f8')
return Trace, Count_zh, Count_sz, Count_dz, count_h, count_z, \
prob_topics_aux, Theta_zh, Psi_sz, Psi_dz, hyper2id, source2id, dest2id
| bsd-3-clause |
BorisJeremic/Real-ESSI-Examples | analytic_solution/test_cases/Contact/Stress_Based_Contact_Verification/SoftContact_ElPPlShear/Area/Normal_Stress_Plot.py | 6 | 4510 | #!/usr/bin/python
import h5py
import matplotlib.pylab as plt
import matplotlib as mpl
import sys
import numpy as np;
import matplotlib;
import math;
from matplotlib.ticker import MaxNLocator
plt.rcParams.update({'font.size': 28})
# set tick width
mpl.rcParams['xtick.major.size'] = 10
mpl.rcParams['xtick.major.width'] = 5
mpl.rcParams['xtick.minor.size'] = 10
mpl.rcParams['xtick.minor.width'] = 5
plt.rcParams['xtick.labelsize']=24
mpl.rcParams['ytick.major.size'] = 10
mpl.rcParams['ytick.major.width'] = 5
mpl.rcParams['ytick.minor.size'] = 10
mpl.rcParams['ytick.minor.width'] = 5
plt.rcParams['ytick.labelsize']=24
plt.style.use('grayscale')
###############################################################
## Area = 1*m^2
###############################################################
# Go over each feioutput and plot each one.
thefile = "A_1/Analytical_Solution_Normal_Stress.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:];
normal_strain = -finput["/Model/Elements/Element_Outputs"][6,:];
# Configure the figure filename, according to the input filename.
outfig=thefile.replace("_","-")
outfigname=outfig.replace("h5.feioutput","pdf")
# Plot the figure. Add labels and titles.
plt.figure(figsize=(12,10))
plt.plot(normal_strain,normal_stress/1000,label=r'Area = $1 m^2$', Linewidth=4, markersize=20)
plt.xlabel(r"Interface Type #")
plt.ylabel(r"Normal Stress $\sigma_n [kPa]$")
plt.hold(True)
###############################################################
## Area = 1e^2 m^2
###############################################################
# Go over each feioutput and plot each one.
thefile = "A_1e2/Analytical_Solution_Normal_Stress.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:];
normal_strain = -finput["/Model/Elements/Element_Outputs"][6,:];
# Configure the figure filename, according to the input filename.
outfig=thefile.replace("_","-")
outfigname=outfig.replace("h5.feioutput","pdf")
# Plot the figure. Add labels and titles.
plt.plot(normal_strain,normal_stress/1000,label=r'Area = $1e^2 m^2$', Linewidth=4, markersize=20)
# plt.xlabel(r"Normal Strain $\epsilon$")
plt.ylabel(r"Normal Stress $\sigma_n [kPa]$")
###############################################################
## Area = 1e^-2 m^2
###############################################################
# Go over each feioutput and plot each one.
thefile = "A_1e-2/Analytical_Solution_Normal_Stress.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:];
normal_strain = -finput["/Model/Elements/Element_Outputs"][6,:];
# Configure the figure filename, according to the input filename.
outfig=thefile.replace("_","-")
outfigname=outfig.replace("h5.feioutput","pdf")
# Plot the figure. Add labels and titles.
plt.plot(normal_strain,normal_stress/1000,label=r'Area = $1e^{-2} m^2$', Linewidth=4, markersize=20)
plt.xlabel(r"Normal Strain $\epsilon$")
plt.ylabel(r"Normal Stress $\sigma_n [kPa]$")
###############################################################
## Area = 1e^-4 m^2
###############################################################
# Go over each feioutput and plot each one.
thefile = "A_1e-4/Analytical_Solution_Normal_Stress.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:];
normal_strain = -finput["/Model/Elements/Element_Outputs"][6,:];
# Configure the figure filename, according to the input filename.
outfig=thefile.replace("_","-")
outfigname=outfig.replace("h5.feioutput","pdf")
# Plot the figure. Add labels and titles.
plt.plot(normal_strain,normal_stress/1000,label=r'Area = $1e^{-4} m^2$', Linewidth=4, markersize=20)
plt.xlabel(r"Normal Strain $\epsilon$")
plt.ylabel(r"Normal Stress $\sigma_n [kPa]$")
#############################################################
# # # axes = plt.gca()
# # # axes.set_xlim([-7,7])
# # # axes.set_ylim([-1,1])
# outfigname = "Interface_Test_Normal_Stress.pdf";
# plt.axis([0, 5.5, 90, 101])
# legend = plt.legend()
# legend.get_frame().set_linewidth(0.0)
# legend.get_frame().set_facecolor('none')
plt.legend()
plt.savefig('Normal_Stress.pdf', bbox_inches='tight')
# plt.show()
| cc0-1.0 |
paula-tataru/SpikeyTree | src/optimize.py | 1 | 8262 | # This file is part of SpikeyTree.
# Copyright (C) 2015 Paula Tataru
# SpikeyTree is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from optparse import OptionParser
import os
import time
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
import multiprocessing
import numpy as np
import numpy.random as nprand
import scipy.optimize as scioptim
from tree import FullTree
from tree import VisitorLikelihood
import utils
class Optim:
'''Optimize and store extra logging information'''
def __init__(self, rep, info_tree, mutex, param):
self.mutex = mutex
# current likelihood
self.curr_lk = 0
# number of function evaluations and iterations
self.no_eval = 1
self.no_iter = 1
# current replication
self.curr_rep = rep
self.lk = VisitorLikelihood()
self.tree = FullTree(info_tree[0], info_tree[1], info_tree[2],
info_tree[3], info_tree[4])
# parameters
self.init = np.array(param)
self.init_tran = utils.to_inf(self.init)
def run(self):
self.mutex.acquire()
print 'Starting repetition ', self.curr_rep
self.mutex.release()
start_time = time.time()
optim = scioptim.minimize(self.get_lk, self.init_tran, (),
'L-BFGS-B', callback=self.status)
end_time = time.time()
running_time = int(end_time - start_time)
to_print = ('Finished repetition %d with lk %g in %g min and %g sec'
% (self.curr_rep, -optim.fun,
running_time / 60, running_time % 60))
if not optim.success:
to_print = ('%s\nOptimization did not converge: %s'
% (to_print, optim.message))
self.mutex.acquire()
print to_print
self.mutex.release()
optim.x = utils.to_pos(optim.x)
return optim, self.init, self.no_iter, self.no_eval
def get_lk(self, param):
untran = utils.to_pos(param)
self.tree.update(untran)
self.curr_lk = -self.lk.get_lk(self.tree)
self.no_eval += 1
return self.curr_lk
def status(self, _):
to_print = ('Iteration %d.%3d.%4d:\t%6.15f' %
(self.curr_rep, self.no_iter, self.no_eval, -self.curr_lk))
self.mutex.acquire()
print to_print
self.mutex.release()
self.no_iter += 1
def threaded_optimize(mutex, runs_queue, optim_queue, info_tree):
'''call optimization from queue with initial parameters,
until encountered the sentinel None'''
while True:
this_run = runs_queue.get()
if this_run is None:
runs_queue.task_done()
break
try:
opt = Optim(this_run[0]+1, info_tree, mutex, this_run[1])
this_optim, init_param, no_iter, no_eval = opt.run()
optim_queue.put([this_run[0], this_optim, init_param,
no_iter, no_eval])
except Exception as inst:
mutex.acquire()
print 'Exception encountered in repetition', this_run[0]+1
print inst
optim_queue.put([this_run[0], None])
runs_queue.task_done()
def optimize(rep, all_bins, filename, all_heights, spikes, output,
no_threads, mode):
'''initialize and start several optimizations'''
init_tree, data = utils.read_data(filename)
info_tree = [init_tree, data, all_bins, all_heights, spikes]
tree = FullTree(info_tree[0], info_tree[1], info_tree[2], info_tree[3],
info_tree[4])
no_pop = tree.no_pop
mutex = multiprocessing.Lock()
runs_queue = multiprocessing.JoinableQueue()
optim_queue = multiprocessing.Queue()
for i in range(no_threads):
p = multiprocessing.Process(target=threaded_optimize,
args=(mutex, runs_queue, optim_queue,
info_tree, ))
p.deamon = True
p.start()
# put the runs in the queue
param = tree.guess_param()
runs_queue.put([0, param])
for i in range(1, rep):
# generate random initial values around the guessed ones
init_param = [p + nprand.uniform(-p, p) for p in param]
runs_queue.put([i, init_param])
# put sentinel for each process
for i in range(no_threads):
runs_queue.put(None)
runs_queue.join()
# I am done, report results
report_results(rep, optim_queue, tree, output, mode)
def report_results(rep, optim_queue, tree, output, mode):
'''report results for all repetitions'''
best_lk = 1e307
best_rep = -1
optim = [None] * rep
for i in range(rep):
aux = optim_queue.get()
optim[aux[0]] = [aux[1], aux[2], aux[3], aux[4]]
if aux[1] is None:
print 'Repetition', aux[0], 'terminated with an exception.'
else:
if optim[aux[0]][0].fun < best_lk:
best_lk = optim[aux[0]][0].fun
best_rep = aux[0]
if best_rep == -1:
print 'All repetitions terminated with exceptions. No results.'
else:
write_result(tree, optim, best_rep, best_lk, output, mode)
def write_result(tree, optim, best_rep, best_lk, output, mode):
'''write the result of optimization to file'''
f = open('%s.txt' % (output), mode=mode)
f.write('Overall best likelihood %6.15f found at repetition %d\n'
% (-best_lk, best_rep+1))
for i, opt in enumerate(optim):
if opt is not None:
f.write('\n------------- Repetition %d\n' % (i+1))
f.write(('Maximum log likelihood %6.15f found after %d iterations '
'and %d function evaluations\n')
% (-opt[0].fun, opt[2], opt[3]))
tree.update(opt[1])
f.write('Starting tree:\n')
utils.write_tree(f, tree, False)
f.write('Optimized tree:\n')
tree.update(opt[0].x)
utils.write_tree(f, tree)
f.write('\n')
f.close()
if __name__ == "__main__":
parser = OptionParser()
parser.add_option('-f', dest='file',
help='input file containing data and tree')
parser.add_option('-o', dest='output',
help='output file to write optimization result')
parser.add_option('-T', dest='height', type='int', default=30,
help='tree height [default: %default]')
parser.add_option('-K', dest='bins', type='int', default=20,
help='number of bins [default: %default]')
parser.add_option('-B', dest='beta', action='store_true', default=False,
help='run beta; otherwise, run beta with spikes [default: %default]')
parser.add_option('-r', dest='rep', type='int', default=1,
help='number of repetitions [default: %default]')
parser.add_option('-t', dest='threads', type='int', default=1,
help='number of threads to run [default: %default]')
(opt, args) = parser.parse_args()
if not opt.beta:
print 'Running optimization using beta with spikes'
opt.output += '_spikes'
else:
print 'Running optimization using beta'
if opt.threads > opt.rep:
opt.threads = opt.rep
print 'Number of threads given if larger than required repetitions.'
print 'Setting number of threads to number of repetitions, ', opt.rep
optimize(opt.rep, opt.bins, opt.file, opt.height, not opt.beta,
opt.output, opt.threads, 'w')
| gpl-3.0 |
tablego00/stkrtp | Bot/LineBot/StoMonBot.py | 1 | 3081 | import sys,requests,json,time,sys,datetime
from bs4 import BeautifulSoup
import numpy as np
import pandas as pd
from flask import Flask, request, abort
from linebot import (
LineBotApi, WebhookHandler
)
from linebot.exceptions import (
InvalidSignatureError
)
from linebot.models import (
MessageEvent, TextMessage, TextSendMessage,
)
from util.AppUtil import AppUtil
app = Flask(__name__)
appUtil = AppUtil()
STOCK_MIS = appUtil.getStockMIS()
RT_URL = appUtil.getStockRealTimePrice()
stockIDList = appUtil.getStockIDs().split(',')
# get channel_secret and channel_access_token from your environment variable
channel_secret = appUtil.getLineChannelSecret()
channel_access_token = appUtil.getLineChannelAccessToken()
if channel_secret is None:
print('Specify LINE_CHANNEL_SECRET as environment variable.')
sys.exit(1)
if channel_access_token is None:
print('Specify LINE_CHANNEL_ACCESS_TOKEN as environment variable.')
sys.exit(1)
line_bot_api = LineBotApi(channel_access_token)
handler = WebhookHandler(channel_secret)
@app.route("/callback", methods=['POST'])
def callback():
print('got callback')
# get X-Line-Signature header value
signature = request.headers['X-Line-Signature']
# get request body as text
body = request.get_data(as_text=True)
app.logger.info("Request body: " + body)
# handle webhook body
try:
handler.handle(body, signature)
except InvalidSignatureError:
abort(400)
return 'OK'
@handler.add(MessageEvent, message=TextMessage)
def handle_message(event):
defRtnStr = 'Don\'t call me , I will call you.'
reciveMsg = event.message.text
print('YYYYYYYYYYYYYYYY')
print(event.reply_token)
print(event.message.text)
print('xxxxxxxxxx')
matching = [s for s in stockIDList if reciveMsg in s]
if (len(matching) == 0):
defRtnStr = 'Don\'t have this stockId.'
else:
with requests.session() as req:
req.get(STOCK_MIS,
headers={'Accept-Language': 'zh-TW'})
timestamp = int(time.time()*1000+1000000)
print(RT_URL.format(timestamp,matching[0]))
res = req.get(RT_URL.format(timestamp,matching[0]))
jsonRtn = res.text.strip()
d = json.loads(jsonRtn)
datas = d['msgArray']
df = pd.DataFrame(columns=['c','n','z','t','tv'])
for data in datas :
try:
data = dict((key,data[key]) for key in ('c','n','z','t','tv'))
df = df.append(data,ignore_index=True)
except Exception as e:
print('something is null.')
print(df)
defRtnStr = str(df.loc[0,'c']) + ' ' + str(df.loc[0,'n']) + ' ' + str(df.loc[0,'z']) + ' ' + str(df.loc[0,'t']) + ' ' + str(df.loc[0,'tv'])
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text=defRtnStr))
@app.route('/')
def index():
return "<p>Hello StoMonBot!</p>"
if __name__ == '__main__':
app.run(debug=True) | bsd-3-clause |
eclee25/flu-SDI-exploratory-age | scripts/create_fluseverity_figs_v3/F3_zOR_benchmark_v3.py | 1 | 5327 | #!/usr/bin/python
##############################################
###Python template
###Author: Elizabeth Lee
###Date: 10/15/14
###Function: mean peak-based retro zOR metric vs. CDC benchmark index, mean Thanksgiving-based early zOR metric vs. CDC benchmark index.
# 10/14/14 OR age flip.
# 10/15 ILI incidence ratio
###Import data: /home/elee/Dropbox/Elizabeth_Bansal_Lab/CDC_Source/Import_Data/cdc_severity_index.csv, SQL_export/OR_allweeks_outpatient.csv, SQL_export/totalpop_age.csv, My_Bansal_Lab/Clean_Data_for_Import/ThanksgivingWeekData_cl.csv
###Command Line: python F3_zOR_benchmark_v3.py
##############################################
### notes ###
### packages/modules ###
import csv
import matplotlib.pyplot as plt
import numpy as np
## local modules ##
import functions_v3 as fxn
### data structures ###
### functions ###
### data files ###
incidin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/OR_allweeks_outpatient.csv','r')
incid = csv.reader(incidin, delimiter=',')
popin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/totalpop_age.csv', 'r')
pop = csv.reader(popin, delimiter=',')
thanksin=open('/home/elee/Dropbox/My_Bansal_Lab/Clean_Data_for_Import/ThanksgivingWeekData_cl.csv', 'r')
thanksin.readline() # remove header
thanks=csv.reader(thanksin, delimiter=',')
ixin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/CDC_Source/Import_Data/cdc_severity_index.csv','r')
ixin.readline()
ix = csv.reader(ixin, delimiter=',')
### called/local plotting parameters ###
ps = fxn.pseasons
sl = fxn.gp_seasonlabels
fs = 24
fssml = 16
### program ###
# import data
# d_benchmark[seasonnum] = CDC benchmark index value
# d_classifzOR[seasonnum] = (mean retrospective zOR, mean early warning zOR)
d_benchmark = fxn.benchmark_import(ix, 8) # no ILINet
# dict_wk[wk] = seasonnum
# dict_incid53ls[s] = [incid rate per 100000 wk40,... incid rate per 100000 wk 39] (unadjusted ILI incidence)
# dict_OR53ls[s] = [OR wk 40,... OR wk 39] (children and adults adjusted for SDI data coverage and ILI-seeking behavior)
# dict_zOR53ls[s] = [zOR wk 40,... zOR wk 39] (children and adults adjusted for SDI data coverage and ILI-seeking behavior)
d_wk, d_incid53ls, d_OR53ls, d_zOR53ls = fxn.week_OR_processing(incid, pop)
# dict_indices[(snum, classif period)] = [wk index 1, wk index 2, etc.]
d_indices = fxn.identify_retro_early_weeks(d_wk, d_incid53ls)
# d_classifzOR[seasonnum] = (mean retrospective zOR, mean early warning zOR)
d_classifzOR = fxn.classif_zOR_processing(d_wk, d_incid53ls, d_zOR53ls, thanks)
# plot values
benchmark = [d_benchmark[s] for s in ps]
retrozOR = [d_classifzOR[s][0] for s in ps]
earlyzOR = [d_classifzOR[s][1] for s in ps]
print 'retro corr coef', np.corrcoef(benchmark, retrozOR)
print 'early corr coef', np.corrcoef(benchmark, earlyzOR)
# draw plots
fig1 = plt.figure()
ax1 = fig1.add_subplot(1,1,1)
# mean retro zOR vs. benchmark index
ax1.plot(benchmark, retrozOR, marker = 'o', color = 'black', linestyle = 'None')
ax1.vlines([-1, 1], -20, 20, colors='k', linestyles='solid')
ax1.hlines([-1, 1], -20, 20, colors='k', linestyles='solid')
ax1.fill([-5, -1, -1, -5], [-1, -1, -15, -15], facecolor='blue', alpha=0.4)
ax1.fill([-1, 1, 1, -1], [-1, -1, 1, 1], facecolor='yellow', alpha=0.4)
ax1.fill([1, 5, 5, 1], [1, 1, 15, 15], facecolor='red', alpha=0.4)
ax1.annotate('Mild', xy=(-4.75,-14), fontsize=fssml)
ax1.annotate('Severe', xy=(1.25,13), fontsize=fssml)
for s, x, y in zip(sl, benchmark, retrozOR):
ax1.annotate(s, xy=(x,y), xytext=(-10,5), textcoords='offset points', fontsize=fssml)
ax1.set_ylabel(fxn.gp_sigma_r, fontsize=fs)
ax1.set_xlabel(fxn.gp_benchmark, fontsize=fs)
ax1.tick_params(axis='both', labelsize=fssml)
ax1.set_xlim([-5,5])
ax1.set_ylim([-20,20])
# plt.savefig('/home/elee/Dropbox/Elizabeth_Bansal_Lab/Manuscripts/Age_Severity/fluseverity_figs_v2/F3/zOR_benchmark.png', transparent=False, bbox_inches='tight', pad_inches=0)
# plt.close()
# plt.show()
plt.savefig(fxn.filename_dummy2, transparent=False, bbox_inches='tight', pad_inches=0)
plt.close()
fig2 = plt.figure()
ax2 = fig2.add_subplot(1,1,1)
# mean early warning zOR vs. benchmark index
ax2.plot(benchmark, earlyzOR, marker = 'o', color = 'black', linestyle = 'None')
ax2.vlines([-1, 1], -20, 20, colors='k', linestyles='solid')
ax2.hlines([-1, 1], -20, 20, colors='k', linestyles='solid')
ax2.fill([-5, -1, -1, -5], [-1, -1, -15, -15], facecolor='blue', alpha=0.4)
ax2.fill([-1, 1, 1, -1], [-1, -1, 1, 1], facecolor='yellow', alpha=0.4)
ax2.fill([1, 5, 5, 1], [1, 1, 15, 15], facecolor='red', alpha=0.4)
ax2.annotate('Mild', xy=(-4.75,-9.5), fontsize=fssml)
ax2.annotate('Severe', xy=(1.25,8.5), fontsize=fssml)
for s, x, y in zip(sl, benchmark, earlyzOR):
ax2.annotate(s, xy=(x,y), xytext=(-10,5), textcoords='offset points', fontsize=fssml)
ax2.set_ylabel(fxn.gp_sigma_w, fontsize=fs)
ax2.set_xlabel(fxn.gp_benchmark, fontsize=fs)
ax2.tick_params(axis='both', labelsize=fssml)
ax2.set_xlim([-5,5])
ax2.set_ylim([-10,15])
# plt.savefig('/home/elee/Dropbox/Elizabeth_Bansal_Lab/Manuscripts/Age_Severity/fluseverity_figs_v2/F3/zOR_benchmark_early.png', transparent=False, bbox_inches='tight', pad_inches=0)
# plt.close()
# plt.show()
plt.savefig(fxn.filename_dummy3, transparent=False, bbox_inches='tight', pad_inches=0)
plt.close()
| mit |
ahoyosid/scikit-learn | examples/decomposition/plot_sparse_coding.py | 247 | 3846 | """
===========================================
Sparse coding with a precomputed dictionary
===========================================
Transform a signal as a sparse combination of Ricker wavelets. This example
visually compares different sparse coding methods using the
:class:`sklearn.decomposition.SparseCoder` estimator. The Ricker (also known
as Mexican hat or the second derivative of a Gaussian) is not a particularly
good kernel to represent piecewise constant signals like this one. It can
therefore be seen how much adding different widths of atoms matters and it
therefore motivates learning the dictionary to best fit your type of signals.
The richer dictionary on the right is not larger in size, heavier subsampling
is performed in order to stay on the same order of magnitude.
"""
print(__doc__)
import numpy as np
import matplotlib.pylab as pl
from sklearn.decomposition import SparseCoder
def ricker_function(resolution, center, width):
"""Discrete sub-sampled Ricker (Mexican hat) wavelet"""
x = np.linspace(0, resolution - 1, resolution)
x = ((2 / ((np.sqrt(3 * width) * np.pi ** 1 / 4)))
* (1 - ((x - center) ** 2 / width ** 2))
* np.exp((-(x - center) ** 2) / (2 * width ** 2)))
return x
def ricker_matrix(width, resolution, n_components):
"""Dictionary of Ricker (Mexican hat) wavelets"""
centers = np.linspace(0, resolution - 1, n_components)
D = np.empty((n_components, resolution))
for i, center in enumerate(centers):
D[i] = ricker_function(resolution, center, width)
D /= np.sqrt(np.sum(D ** 2, axis=1))[:, np.newaxis]
return D
resolution = 1024
subsampling = 3 # subsampling factor
width = 100
n_components = resolution / subsampling
# Compute a wavelet dictionary
D_fixed = ricker_matrix(width=width, resolution=resolution,
n_components=n_components)
D_multi = np.r_[tuple(ricker_matrix(width=w, resolution=resolution,
n_components=np.floor(n_components / 5))
for w in (10, 50, 100, 500, 1000))]
# Generate a signal
y = np.linspace(0, resolution - 1, resolution)
first_quarter = y < resolution / 4
y[first_quarter] = 3.
y[np.logical_not(first_quarter)] = -1.
# List the different sparse coding methods in the following format:
# (title, transform_algorithm, transform_alpha, transform_n_nozero_coefs)
estimators = [('OMP', 'omp', None, 15), ('Lasso', 'lasso_cd', 2, None), ]
pl.figure(figsize=(13, 6))
for subplot, (D, title) in enumerate(zip((D_fixed, D_multi),
('fixed width', 'multiple widths'))):
pl.subplot(1, 2, subplot + 1)
pl.title('Sparse coding against %s dictionary' % title)
pl.plot(y, ls='dotted', label='Original signal')
# Do a wavelet approximation
for title, algo, alpha, n_nonzero in estimators:
coder = SparseCoder(dictionary=D, transform_n_nonzero_coefs=n_nonzero,
transform_alpha=alpha, transform_algorithm=algo)
x = coder.transform(y)
density = len(np.flatnonzero(x))
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
pl.plot(x, label='%s: %s nonzero coefs,\n%.2f error'
% (title, density, squared_error))
# Soft thresholding debiasing
coder = SparseCoder(dictionary=D, transform_algorithm='threshold',
transform_alpha=20)
x = coder.transform(y)
_, idx = np.where(x != 0)
x[0, idx], _, _, _ = np.linalg.lstsq(D[idx, :].T, y)
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
pl.plot(x,
label='Thresholding w/ debiasing:\n%d nonzero coefs, %.2f error' %
(len(idx), squared_error))
pl.axis('tight')
pl.legend()
pl.subplots_adjust(.04, .07, .97, .90, .09, .2)
pl.show()
| bsd-3-clause |
jseabold/statsmodels | statsmodels/graphics/tests/test_tsaplots.py | 1 | 8314 | from statsmodels.compat.python import lmap
from io import BytesIO
import numpy as np
import pandas as pd
from numpy.testing import assert_equal, assert_
import pytest
import statsmodels.api as sm
from statsmodels.graphics.tsaplots import (
plot_acf,
plot_pacf,
month_plot,
quarter_plot,
seasonal_plot,
plot_predict,
)
import statsmodels.tsa.arima_process as tsp
from statsmodels.tsa.ar_model import AutoReg
from statsmodels.tsa.arima.model import ARIMA
try:
import matplotlib.pyplot as plt
except ImportError:
pass
@pytest.mark.matplotlib
def test_plot_acf(close_figures):
# Just test that it runs.
fig = plt.figure()
ax = fig.add_subplot(111)
ar = np.r_[1.0, -0.9]
ma = np.r_[1.0, 0.9]
armaprocess = tsp.ArmaProcess(ar, ma)
rs = np.random.RandomState(1234)
acf = armaprocess.generate_sample(100, distrvs=rs.standard_normal)
plot_acf(acf, ax=ax, lags=10)
plot_acf(acf, ax=ax)
plot_acf(acf, ax=ax, alpha=None)
@pytest.mark.matplotlib
def test_plot_acf_irregular(close_figures):
# Just test that it runs.
fig = plt.figure()
ax = fig.add_subplot(111)
ar = np.r_[1.0, -0.9]
ma = np.r_[1.0, 0.9]
armaprocess = tsp.ArmaProcess(ar, ma)
rs = np.random.RandomState(1234)
acf = armaprocess.generate_sample(100, distrvs=rs.standard_normal)
plot_acf(acf, ax=ax, lags=np.arange(1, 11))
plot_acf(acf, ax=ax, lags=10, zero=False)
plot_acf(acf, ax=ax, alpha=None, zero=False)
@pytest.mark.matplotlib
def test_plot_pacf(close_figures):
# Just test that it runs.
fig = plt.figure()
ax = fig.add_subplot(111)
ar = np.r_[1.0, -0.9]
ma = np.r_[1.0, 0.9]
armaprocess = tsp.ArmaProcess(ar, ma)
rs = np.random.RandomState(1234)
pacf = armaprocess.generate_sample(100, distrvs=rs.standard_normal)
plot_pacf(pacf, ax=ax)
plot_pacf(pacf, ax=ax, alpha=None)
@pytest.mark.matplotlib
def test_plot_pacf_kwargs(close_figures):
# Just test that it runs.
fig = plt.figure()
ax = fig.add_subplot(111)
ar = np.r_[1.0, -0.9]
ma = np.r_[1.0, 0.9]
armaprocess = tsp.ArmaProcess(ar, ma)
rs = np.random.RandomState(1234)
pacf = armaprocess.generate_sample(100, distrvs=rs.standard_normal)
buff = BytesIO()
plot_pacf(pacf, ax=ax)
fig.savefig(buff, format="rgba")
buff_linestyle = BytesIO()
fig_linestyle = plt.figure()
ax = fig_linestyle.add_subplot(111)
plot_pacf(pacf, ax=ax, ls="-")
fig_linestyle.savefig(buff_linestyle, format="rgba")
buff_with_vlines = BytesIO()
fig_with_vlines = plt.figure()
ax = fig_with_vlines.add_subplot(111)
vlines_kwargs = {"linestyles": "dashdot"}
plot_pacf(pacf, ax=ax, vlines_kwargs=vlines_kwargs)
fig_with_vlines.savefig(buff_with_vlines, format="rgba")
buff.seek(0)
buff_linestyle.seek(0)
buff_with_vlines.seek(0)
plain = buff.read()
linestyle = buff_linestyle.read()
with_vlines = buff_with_vlines.read()
assert_(plain != linestyle)
assert_(with_vlines != plain)
assert_(linestyle != with_vlines)
@pytest.mark.matplotlib
def test_plot_acf_kwargs(close_figures):
# Just test that it runs.
fig = plt.figure()
ax = fig.add_subplot(111)
ar = np.r_[1.0, -0.9]
ma = np.r_[1.0, 0.9]
armaprocess = tsp.ArmaProcess(ar, ma)
rs = np.random.RandomState(1234)
acf = armaprocess.generate_sample(100, distrvs=rs.standard_normal)
buff = BytesIO()
plot_acf(acf, ax=ax)
fig.savefig(buff, format="rgba")
buff_with_vlines = BytesIO()
fig_with_vlines = plt.figure()
ax = fig_with_vlines.add_subplot(111)
vlines_kwargs = {"linestyles": "dashdot"}
plot_acf(acf, ax=ax, vlines_kwargs=vlines_kwargs)
fig_with_vlines.savefig(buff_with_vlines, format="rgba")
buff.seek(0)
buff_with_vlines.seek(0)
plain = buff.read()
with_vlines = buff_with_vlines.read()
assert_(with_vlines != plain)
@pytest.mark.matplotlib
def test_plot_acf_missing(close_figures):
# Just test that it runs.
fig = plt.figure()
ax = fig.add_subplot(111)
ar = np.r_[1.0, -0.9]
ma = np.r_[1.0, 0.9]
armaprocess = tsp.ArmaProcess(ar, ma)
rs = np.random.RandomState(1234)
acf = armaprocess.generate_sample(100, distrvs=rs.standard_normal)
acf[::13] = np.nan
buff = BytesIO()
plot_acf(acf, ax=ax, missing="drop")
fig.savefig(buff, format="rgba")
buff.seek(0)
fig = plt.figure()
ax = fig.add_subplot(111)
buff_conservative = BytesIO()
plot_acf(acf, ax=ax, missing="conservative")
fig.savefig(buff_conservative, format="rgba")
buff_conservative.seek(0)
assert_(buff.read() != buff_conservative.read())
@pytest.mark.matplotlib
def test_plot_pacf_irregular(close_figures):
# Just test that it runs.
fig = plt.figure()
ax = fig.add_subplot(111)
ar = np.r_[1.0, -0.9]
ma = np.r_[1.0, 0.9]
armaprocess = tsp.ArmaProcess(ar, ma)
rs = np.random.RandomState(1234)
pacf = armaprocess.generate_sample(100, distrvs=rs.standard_normal)
plot_pacf(pacf, ax=ax, lags=np.arange(1, 11))
plot_pacf(pacf, ax=ax, lags=10, zero=False)
plot_pacf(pacf, ax=ax, alpha=None, zero=False)
@pytest.mark.matplotlib
def test_plot_month(close_figures):
dta = sm.datasets.elnino.load_pandas().data
dta["YEAR"] = dta.YEAR.astype(int).apply(str)
dta = dta.set_index("YEAR").T.unstack()
dates = pd.to_datetime(["-".join([x[1], x[0]]) for x in dta.index.values])
# test dates argument
fig = month_plot(dta.values, dates=dates, ylabel="el nino")
# test with a TimeSeries DatetimeIndex with no freq
dta.index = pd.DatetimeIndex(dates)
fig = month_plot(dta)
# w freq
dta.index = pd.DatetimeIndex(dates, freq="MS")
fig = month_plot(dta)
# test with a TimeSeries PeriodIndex
dta.index = pd.PeriodIndex(dates, freq="M")
fig = month_plot(dta)
@pytest.mark.matplotlib
def test_plot_quarter(close_figures):
dta = sm.datasets.macrodata.load_pandas().data
dates = lmap(
"Q".join,
zip(
dta.year.astype(int).apply(str), dta.quarter.astype(int).apply(str)
),
)
# test dates argument
quarter_plot(dta.unemp.values, dates)
# test with a DatetimeIndex with no freq
dta.set_index(pd.to_datetime(dates), inplace=True)
quarter_plot(dta.unemp)
# w freq
# see pandas #6631
dta.index = pd.DatetimeIndex(pd.to_datetime(dates), freq="QS-Oct")
quarter_plot(dta.unemp)
# w PeriodIndex
dta.index = pd.PeriodIndex(pd.to_datetime(dates), freq="Q")
quarter_plot(dta.unemp)
@pytest.mark.matplotlib
def test_seasonal_plot(close_figures):
rs = np.random.RandomState(1234)
data = rs.randn(20, 12)
data += 6 * np.sin(np.arange(12.0) / 11 * np.pi)[None, :]
data = data.ravel()
months = np.tile(np.arange(1, 13), (20, 1))
months = months.ravel()
df = pd.DataFrame([data, months], index=["data", "months"]).T
grouped = df.groupby("months")["data"]
labels = [
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec",
]
fig = seasonal_plot(grouped, labels)
ax = fig.get_axes()[0]
output = [tl.get_text() for tl in ax.get_xticklabels()]
assert_equal(labels, output)
@pytest.mark.matplotlib
@pytest.mark.parametrize(
"model_and_args",
[(AutoReg, dict(lags=2, old_names=False)), (ARIMA, dict(order=(2, 0, 0)))],
)
@pytest.mark.parametrize("use_pandas", [True, False])
@pytest.mark.parametrize("alpha", [None, 0.10])
def test_predict_plot(use_pandas, model_and_args, alpha):
model, kwargs = model_and_args
rs = np.random.RandomState(0)
y = rs.standard_normal(1000)
for i in range(2, 1000):
y[i] += 1.8 * y[i - 1] - 0.9 * y[i - 2]
y = y[100:]
if use_pandas:
index = pd.date_range("1960-1-1", freq="M", periods=y.shape[0] + 24)
start = index[index.shape[0] // 2]
end = index[-1]
y = pd.Series(y, index=index[:-24])
else:
start = y.shape[0] // 2
end = y.shape[0] + 24
res = model(y, **kwargs).fit()
fig = plot_predict(res, start, end, alpha=alpha)
assert isinstance(fig, plt.Figure)
| bsd-3-clause |
panoptes/POCS | docs/conf.py | 1 | 9823 | # -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import inspect
import os
import shutil
import sys
__location__ = os.path.join(os.getcwd(), os.path.dirname(
inspect.getfile(inspect.currentframe())))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.join(__location__, '../src'))
# -- Run sphinx-apidoc ------------------------------------------------------
# This hack is necessary since RTD does not issue `sphinx-apidoc` before running
# `sphinx-build -b html . _build/html`. See Issue:
# https://github.com/rtfd/readthedocs.org/issues/1139
# DON'T FORGET: Check the box "Install your project inside a virtualenv using
# setup.py install" in the RTD Advanced Settings.
# Additionally it helps us to avoid running apidoc manually
try: # for Sphinx >= 1.7
from sphinx.ext import apidoc
except ImportError:
from sphinx import apidoc
output_dir = os.path.join(__location__, "api")
module_dir = os.path.join(__location__, "../src/panoptes")
try:
shutil.rmtree(output_dir)
except FileNotFoundError:
pass
try:
import sphinx
from pkg_resources import parse_version
cmd_line_template = "sphinx-apidoc -f -o {outputdir} {moduledir}"
cmd_line = cmd_line_template.format(outputdir=output_dir, moduledir=module_dir)
args = cmd_line.split(" ")
if parse_version(sphinx.__version__) >= parse_version('1.7'):
args = args[1:]
apidoc.main(args)
except Exception as e:
print("Running `sphinx-apidoc` failed!\n{}".format(e))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autosummary',
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'matplotlib.sphinxext.plot_directive',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = ['.rst']
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'POCS'
copyright = u'2020, Project PANOPTES'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '' # Is set by calling `setup.py docs`
# The full version, including alpha/beta/rc tags.
release = '' # Is set by calling `setup.py docs`
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {
# 'sidebar_width': '300px',
# 'page_width': '1200px'
# }
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
try:
from panoptes.pocs import __version__ as version
except ImportError:
pass
else:
release = version
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = ""
html_logo = '_static/pan-title-black-transparent.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pocs-doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'user_guide.tex', u'POCS Documentation',
u'Project PANOPTES', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = ""
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- External mapping ------------------------------------------------------------
python_version = '.'.join(map(str, sys.version_info[0:2]))
intersphinx_mapping = {
'sphinx': ('https://www.sphinx-doc.org/en/stable', None),
'python': ('https://docs.python.org/' + python_version, None),
'matplotlib': ('https://matplotlib.org', None),
'numpy': ('https://docs.scipy.org/doc/numpy', None),
'sklearn': ('https://scikit-learn.org/stable', None),
'pandas': ('https://pandas.pydata.org/pandas-docs/stable', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference', None),
'astropy': ('https://docs.astropy.org/en/stable/', None),
'astroplan': ('https://astroplan.readthedocs.io/en/latest/', None),
'panoptes.utils': ('https://panoptes-utils.readthedocs.io/en/latest/', None),
}
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
def skip(app, what, name, obj, would_skip, options):
if name == "__init__":
return False
return would_skip
def setup(app):
app.connect("autodoc-skip-member", skip)
| mit |
miykael/BrainsForPublication | scripts/subj_anat_gif.py | 3 | 10806 | #!/usr/bin/env python
#=============================================================================
# Created by Kirstie Whitaker
# at OHBM 2016 Brainhack in Lausanne, June 2016
# Contact: [email protected]
#=============================================================================
#=============================================================================
# IMPORTS
#=============================================================================
import os
import sys
import argparse
import textwrap
from glob import glob
import numpy as np
import nibabel as nib
from nilearn import plotting
from nilearn.plotting import cm
from nilearn.image import reorder_img
from nilearn.image.resampling import coord_transform
import imageio
#=============================================================================
# FUNCTIONS
#=============================================================================
def setup_argparser():
'''
Code to read in arguments from the command line
Also allows you to change some settings
'''
# Build a basic parser.
help_text = ('Plot an anatomical volume in subject space,\noptionally overlay another image in the same space,\nand make into a gif')
sign_off = 'Author: Kirstie Whitaker <[email protected]>'
parser = argparse.ArgumentParser(description=help_text,
epilog=sign_off,
formatter_class=argparse.RawTextHelpFormatter)
# Now add the arguments
parser.add_argument(dest='anat_file',
type=str,
metavar='anat_file',
help='Nifti or mgz file in subject space that you want to visualise')
parser.add_argument('-o', '--overlay_file',
type=str,
metavar='overlay_file',
help='Nifti or mgz file in subject space that you want to overlay',
default=None)
parser.add_argument('-a,', '--axis',
type=str,
metavar='axis',
help=textwrap.dedent("The axis you'd like to project.\nOptions are:\n x: sagittal\n y: coronal\n z: axial\n\nDefault: ortho"),
default='x')
parser.add_argument('-c', '--cmap',
type=str,
metavar='cmap',
help=textwrap.dedent('Any matplotlib colormap listed at\n http://matplotlib.org/examples/color/colormaps_reference.html\nDefault: gray'),
default='gray')
parser.add_argument('-oc', '--overlay_cmap',
type=str,
metavar='overlay_cmap',
help=textwrap.dedent('Any matplotlib colormap listed at\n http://matplotlib.org/examples/color/colormaps_reference.html\nDefault: prism'),
default='prism')
parser.add_argument('--black_bg',
action='store_true',
help=textwrap.dedent('Set the background to black.\nDefault: White'),
default=False)
parser.add_argument('--annotate',
action='store_true',
help=textwrap.dedent('Add L and R labels to images.\nDefault: False'),
default=False)
parser.add_argument('-t', '--thr',
type=float,
metavar='thr',
help=textwrap.dedent('Mask the input image such that all values\n which have an absolute value less than this threshold\n are not shown.\nIf None then no thresholding is undertaken.\nDefault: None'),
default=None)
parser.add_argument('--dpi',
type=float,
metavar='dpi',
help='DPI of output png file\nDefault: 300',
default=300)
arguments = parser.parse_args()
return arguments, parser
#=============================================================================
# SET SOME VARIABLES
#=============================================================================
# Read in the arguments from argparse
arguments, parser = setup_argparser()
anat_file = arguments.anat_file
overlay_file = arguments.overlay_file
axis = arguments.axis
cmap = arguments.cmap
overlay_cmap = arguments.overlay_cmap
threshold = arguments.thr
black_bg = arguments.black_bg
annotate = arguments.annotate
dpi = arguments.dpi
# Set a couple of hard coded options
draw_cross = False
#===============================================================================
# Make a bunch of dictionaries that allow you to loop through x, y and z
#===============================================================================
# The x, y, z coord_transform dictionaries contains keys that
# are either 'x', 'y', 'z' and values that are functions to
# convert that axis to alligned space.
def coord_transform_x(x, img):
x, y, z = coord_transform(x, 0, 0, img.affine)
return x
def coord_transform_y(y, img):
x, y, z = coord_transform(0, y, 0, img.affine)
return y
def coord_transform_z(z, img):
x, y, z = coord_transform(0, 0, z, img.affine)
return z
coord_transform_dict = { 'x' : coord_transform_x,
'y' : coord_transform_y,
'z' : coord_transform_z }
# The x, y, z slice dictionaries contains keys that
# are either 'x', 'y', 'z' and values that are functions to
# return the slice through a specific coordinate.
def slice_x(x, img):
s = img.get_data()[x, :, :]
return s
def slice_y(y, img):
s = img.get_data()[:, y, :]
return s
def slice_z(z, img):
s = img.get_data()[:, :, z]
return s
slice_dict = { 'x' : slice_x,
'y' : slice_y,
'z' : slice_z }
# The x, y, z dim lookup dictionary contains keys that
# are either 'x', 'y', 'z' and values that correspond to
# the axis of the image.
# For example, 'x' is the first axis of the image: 0
dim_lookup_dict = { 'x' : 0,
'y' : 1,
'z' : 2 }
# The x, y, z label lookup dictionary contains keys that
# are either 'x', 'y', 'z' and values that correspond to
# the name of the projection.
label_lookup_dict = { 'x' : 'sagittal',
'y' : 'coronal',
'z' : 'axial' }
#===============================================================================
# Get the colormap from nilearn
#===============================================================================
if hasattr(cm, cmap):
cmap = getattr(cm, cmap)
#===============================================================================
# Set up the output directory & gif file name
#===============================================================================
# First figure out the name of the overlay file
if overlay_file is None:
overlay_name = ''
elif '.mgz' in overlay_file:
overlay_name = '_' + os.path.basename(overlay_file).rsplit('.mgz', 1)[0]
else:
overlay_name = '_' + os.path.basename(overlay_file).rsplit('.nii', 1)[0]
# Add that overlay string to the pngs folder and gif file
if '.mgz' in anat_file:
pngs_dir = anat_file.rsplit('.mgz', 1)[0] + '_PNGS'
gif_file = (anat_file.rsplit('.mgz', 1)[0]
+ overlay_name
+ '_{}.gif'.format(label_lookup_dict[axis]))
else:
pngs_dir = anat_file.rsplit('.nii', 1)[0] + '_PNGS'
gif_file = (anat_file.rsplit('.nii', 1)[0]
+ overlay_name
+ '_{}.gif'.format(label_lookup_dict[axis]))
if not os.path.isdir(pngs_dir):
os.makedirs(pngs_dir)
#===============================================================================
# Read in the images using nibabel
#===============================================================================
img = nib.load(anat_file)
# Convert the data to float
data = img.get_data()
data = data.astype('float')
# Reslice the image so there are no rotations in the affine.
# This step is actually included in the nilearn plot_anat command below
# but it runs faster if the image has already been resliced.
img_reslice = reorder_img(img, resample='continuous')
# Do the same if you have an overlay file too
if not overlay_file is None:
overlay_img = nib.load(overlay_file)
data = overlay_img.get_data()
data = data.astype('float')
overlay_img_reslice = reorder_img(overlay_img, resample='nearest')
#===============================================================================
# Plot each slice unless it's empty!
#===============================================================================
# Loop through all the slices in this dimension
for i in np.arange(img_reslice.shape[dim_lookup_dict[axis]], dtype='float'):
# Test to see if there is anything worth showing in the image
# If the anat image (img) is empty then don't make a picture
if slice_dict[axis](i, img).mean() == 0.0:
continue
# Get the co-ordinate you want
coord = coord_transform_dict[axis](i, img_reslice)
# Make the image
slicer = plotting.plot_anat(img_reslice,
threshold=None,
cmap=cmap,
display_mode=axis,
black_bg=black_bg,
annotate=annotate,
draw_cross=draw_cross,
cut_coords=(coord,))
# Add the overlay if given
if not overlay_file is None:
slicer.add_overlay(overlay_img_reslice, cmap=overlay_cmap)
# Save the png file
output_file = os.path.join(pngs_dir,
'{}_{:03.0f}{}.png'.format(label_lookup_dict[axis],
i,
overlay_name))
slicer.savefig(output_file, dpi=dpi)
slicer.close()
#===============================================================================
# Now make a gif!
#===============================================================================
png_list = glob(os.path.join(pngs_dir,
'{}*{}.png'.format(label_lookup_dict[axis], overlay_name)))
png_list.sort()
with imageio.get_writer(gif_file, mode='I') as writer:
for fname in png_list:
image = imageio.imread(fname)
writer.append_data(image)
#===============================================================================
# WAY TO GO!
#===============================================================================
| mit |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/pandas/sparse/tests/test_indexing.py | 7 | 38977 | # pylint: disable-msg=E1101,W0612
import nose # noqa
import numpy as np
import pandas as pd
import pandas.util.testing as tm
class TestSparseSeriesIndexing(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.orig = pd.Series([1, np.nan, np.nan, 3, np.nan])
self.sparse = self.orig.to_sparse()
def test_getitem(self):
orig = self.orig
sparse = self.sparse
self.assertEqual(sparse[0], 1)
self.assertTrue(np.isnan(sparse[1]))
self.assertEqual(sparse[3], 3)
result = sparse[[1, 3, 4]]
exp = orig[[1, 3, 4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse[orig % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse[sparse % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_getitem_slice(self):
orig = self.orig
sparse = self.sparse
tm.assert_sp_series_equal(sparse[:2], orig[:2].to_sparse())
tm.assert_sp_series_equal(sparse[4:2], orig[4:2].to_sparse())
tm.assert_sp_series_equal(sparse[::2], orig[::2].to_sparse())
tm.assert_sp_series_equal(sparse[-5:], orig[-5:].to_sparse())
def test_getitem_int_dtype(self):
# GH 8292
s = pd.SparseSeries([0, 1, 2, 3, 4, 5, 6], name='xxx')
res = s[::2]
exp = pd.SparseSeries([0, 2, 4, 6], index=[0, 2, 4, 6], name='xxx')
tm.assert_sp_series_equal(res, exp)
self.assertEqual(res.dtype, np.int64)
s = pd.SparseSeries([0, 1, 2, 3, 4, 5, 6], fill_value=0, name='xxx')
res = s[::2]
exp = pd.SparseSeries([0, 2, 4, 6], index=[0, 2, 4, 6],
fill_value=0, name='xxx')
tm.assert_sp_series_equal(res, exp)
self.assertEqual(res.dtype, np.int64)
def test_getitem_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse(fill_value=0)
self.assertEqual(sparse[0], 1)
self.assertTrue(np.isnan(sparse[1]))
self.assertEqual(sparse[2], 0)
self.assertEqual(sparse[3], 3)
result = sparse[[1, 3, 4]]
exp = orig[[1, 3, 4]].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse[orig % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse[sparse % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_getitem_ellipsis(self):
# GH 9467
s = pd.SparseSeries([1, np.nan, 2, 0, np.nan])
tm.assert_sp_series_equal(s[...], s)
s = pd.SparseSeries([1, np.nan, 2, 0, np.nan], fill_value=0)
tm.assert_sp_series_equal(s[...], s)
def test_getitem_slice_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse[:2],
orig[:2].to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse[4:2],
orig[4:2].to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse[::2],
orig[::2].to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse[-5:],
orig[-5:].to_sparse(fill_value=0))
def test_loc(self):
orig = self.orig
sparse = self.sparse
self.assertEqual(sparse.loc[0], 1)
self.assertTrue(np.isnan(sparse.loc[1]))
result = sparse.loc[[1, 3, 4]]
exp = orig.loc[[1, 3, 4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
# exceeds the bounds
result = sparse.loc[[1, 3, 4, 5]]
exp = orig.loc[[1, 3, 4, 5]].to_sparse()
tm.assert_sp_series_equal(result, exp)
# padded with NaN
self.assertTrue(np.isnan(result[-1]))
# dense array
result = sparse.loc[orig % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse.loc[sparse % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse.loc[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_loc_index(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan], index=list('ABCDE'))
sparse = orig.to_sparse()
self.assertEqual(sparse.loc['A'], 1)
self.assertTrue(np.isnan(sparse.loc['B']))
result = sparse.loc[['A', 'C', 'D']]
exp = orig.loc[['A', 'C', 'D']].to_sparse()
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse.loc[orig % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse.loc[sparse % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_loc_index_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0], index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
self.assertEqual(sparse.loc['A'], 1)
self.assertTrue(np.isnan(sparse.loc['B']))
result = sparse.loc[['A', 'C', 'D']]
exp = orig.loc[['A', 'C', 'D']].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse.loc[orig % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse.loc[sparse % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
def test_loc_slice(self):
orig = self.orig
sparse = self.sparse
tm.assert_sp_series_equal(sparse.loc[2:], orig.loc[2:].to_sparse())
def test_loc_slice_index_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0], index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.loc['C':],
orig.loc['C':].to_sparse(fill_value=0))
def test_loc_slice_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.loc[2:],
orig.loc[2:].to_sparse(fill_value=0))
def test_iloc(self):
orig = self.orig
sparse = self.sparse
self.assertEqual(sparse.iloc[3], 3)
self.assertTrue(np.isnan(sparse.iloc[2]))
result = sparse.iloc[[1, 3, 4]]
exp = orig.iloc[[1, 3, 4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
result = sparse.iloc[[1, -2, -4]]
exp = orig.iloc[[1, -2, -4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
with tm.assertRaises(IndexError):
sparse.iloc[[1, 3, 5]]
def test_iloc_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse(fill_value=0)
self.assertEqual(sparse.iloc[3], 3)
self.assertTrue(np.isnan(sparse.iloc[1]))
self.assertEqual(sparse.iloc[4], 0)
result = sparse.iloc[[1, 3, 4]]
exp = orig.iloc[[1, 3, 4]].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
def test_iloc_slice(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan])
sparse = orig.to_sparse()
tm.assert_sp_series_equal(sparse.iloc[2:], orig.iloc[2:].to_sparse())
def test_iloc_slice_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.iloc[2:],
orig.iloc[2:].to_sparse(fill_value=0))
def test_at(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan])
sparse = orig.to_sparse()
self.assertEqual(sparse.at[0], orig.at[0])
self.assertTrue(np.isnan(sparse.at[1]))
self.assertTrue(np.isnan(sparse.at[2]))
self.assertEqual(sparse.at[3], orig.at[3])
self.assertTrue(np.isnan(sparse.at[4]))
orig = pd.Series([1, np.nan, np.nan, 3, np.nan],
index=list('abcde'))
sparse = orig.to_sparse()
self.assertEqual(sparse.at['a'], orig.at['a'])
self.assertTrue(np.isnan(sparse.at['b']))
self.assertTrue(np.isnan(sparse.at['c']))
self.assertEqual(sparse.at['d'], orig.at['d'])
self.assertTrue(np.isnan(sparse.at['e']))
def test_at_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0],
index=list('abcde'))
sparse = orig.to_sparse(fill_value=0)
self.assertEqual(sparse.at['a'], orig.at['a'])
self.assertTrue(np.isnan(sparse.at['b']))
self.assertEqual(sparse.at['c'], orig.at['c'])
self.assertEqual(sparse.at['d'], orig.at['d'])
self.assertEqual(sparse.at['e'], orig.at['e'])
def test_iat(self):
orig = self.orig
sparse = self.sparse
self.assertEqual(sparse.iat[0], orig.iat[0])
self.assertTrue(np.isnan(sparse.iat[1]))
self.assertTrue(np.isnan(sparse.iat[2]))
self.assertEqual(sparse.iat[3], orig.iat[3])
self.assertTrue(np.isnan(sparse.iat[4]))
self.assertTrue(np.isnan(sparse.iat[-1]))
self.assertEqual(sparse.iat[-5], orig.iat[-5])
def test_iat_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse()
self.assertEqual(sparse.iat[0], orig.iat[0])
self.assertTrue(np.isnan(sparse.iat[1]))
self.assertEqual(sparse.iat[2], orig.iat[2])
self.assertEqual(sparse.iat[3], orig.iat[3])
self.assertEqual(sparse.iat[4], orig.iat[4])
self.assertEqual(sparse.iat[-1], orig.iat[-1])
self.assertEqual(sparse.iat[-5], orig.iat[-5])
def test_get(self):
s = pd.SparseSeries([1, np.nan, np.nan, 3, np.nan])
self.assertEqual(s.get(0), 1)
self.assertTrue(np.isnan(s.get(1)))
self.assertIsNone(s.get(5))
s = pd.SparseSeries([1, np.nan, 0, 3, 0], index=list('ABCDE'))
self.assertEqual(s.get('A'), 1)
self.assertTrue(np.isnan(s.get('B')))
self.assertEqual(s.get('C'), 0)
self.assertIsNone(s.get('XX'))
s = pd.SparseSeries([1, np.nan, 0, 3, 0], index=list('ABCDE'),
fill_value=0)
self.assertEqual(s.get('A'), 1)
self.assertTrue(np.isnan(s.get('B')))
self.assertEqual(s.get('C'), 0)
self.assertIsNone(s.get('XX'))
def test_take(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan],
index=list('ABCDE'))
sparse = orig.to_sparse()
tm.assert_sp_series_equal(sparse.take([0]),
orig.take([0]).to_sparse())
tm.assert_sp_series_equal(sparse.take([0, 1, 3]),
orig.take([0, 1, 3]).to_sparse())
tm.assert_sp_series_equal(sparse.take([-1, -2]),
orig.take([-1, -2]).to_sparse())
def test_take_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0],
index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.take([0]),
orig.take([0]).to_sparse(fill_value=0))
exp = orig.take([0, 1, 3]).to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.take([0, 1, 3]), exp)
exp = orig.take([-1, -2]).to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.take([-1, -2]), exp)
def test_reindex(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan],
index=list('ABCDE'))
sparse = orig.to_sparse()
res = sparse.reindex(['A', 'E', 'C', 'D'])
exp = orig.reindex(['A', 'E', 'C', 'D']).to_sparse()
tm.assert_sp_series_equal(res, exp)
# all missing & fill_value
res = sparse.reindex(['B', 'E', 'C'])
exp = orig.reindex(['B', 'E', 'C']).to_sparse()
tm.assert_sp_series_equal(res, exp)
orig = pd.Series([np.nan, np.nan, np.nan, np.nan, np.nan],
index=list('ABCDE'))
sparse = orig.to_sparse()
res = sparse.reindex(['A', 'E', 'C', 'D'])
exp = orig.reindex(['A', 'E', 'C', 'D']).to_sparse()
tm.assert_sp_series_equal(res, exp)
def test_reindex_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0], index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
res = sparse.reindex(['A', 'E', 'C', 'D'])
exp = orig.reindex(['A', 'E', 'C', 'D']).to_sparse(fill_value=0)
tm.assert_sp_series_equal(res, exp)
# includes missing and fill_value
res = sparse.reindex(['A', 'B', 'C'])
exp = orig.reindex(['A', 'B', 'C']).to_sparse(fill_value=0)
tm.assert_sp_series_equal(res, exp)
# all missing
orig = pd.Series([np.nan, np.nan, np.nan, np.nan, np.nan],
index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
res = sparse.reindex(['A', 'E', 'C', 'D'])
exp = orig.reindex(['A', 'E', 'C', 'D']).to_sparse(fill_value=0)
tm.assert_sp_series_equal(res, exp)
# all fill_value
orig = pd.Series([0., 0., 0., 0., 0.],
index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
res = sparse.reindex(['A', 'E', 'C', 'D'])
exp = orig.reindex(['A', 'E', 'C', 'D']).to_sparse(fill_value=0)
tm.assert_sp_series_equal(res, exp)
def tests_indexing_with_sparse(self):
# GH 13985
for kind in ['integer', 'block']:
for fill in [True, False, np.nan]:
arr = pd.SparseArray([1, 2, 3], kind=kind)
indexer = pd.SparseArray([True, False, True], fill_value=fill,
dtype=bool)
tm.assert_sp_array_equal(pd.SparseArray([1, 3], kind=kind),
arr[indexer])
s = pd.SparseSeries(arr, index=['a', 'b', 'c'],
dtype=np.float64)
exp = pd.SparseSeries([1, 3], index=['a', 'c'],
dtype=np.float64, kind=kind)
tm.assert_sp_series_equal(s[indexer], exp)
tm.assert_sp_series_equal(s.loc[indexer], exp)
tm.assert_sp_series_equal(s.iloc[indexer], exp)
indexer = pd.SparseSeries(indexer, index=['a', 'b', 'c'])
tm.assert_sp_series_equal(s[indexer], exp)
tm.assert_sp_series_equal(s.loc[indexer], exp)
msg = ("iLocation based boolean indexing cannot use an "
"indexable as a mask")
with tm.assertRaisesRegexp(ValueError, msg):
s.iloc[indexer]
class TestSparseSeriesMultiIndexing(TestSparseSeriesIndexing):
_multiprocess_can_split_ = True
def setUp(self):
# Mi with duplicated values
idx = pd.MultiIndex.from_tuples([('A', 0), ('A', 1), ('B', 0),
('C', 0), ('C', 1)])
self.orig = pd.Series([1, np.nan, np.nan, 3, np.nan], index=idx)
self.sparse = self.orig.to_sparse()
def test_getitem_multi(self):
orig = self.orig
sparse = self.sparse
self.assertEqual(sparse[0], orig[0])
self.assertTrue(np.isnan(sparse[1]))
self.assertEqual(sparse[3], orig[3])
tm.assert_sp_series_equal(sparse['A'], orig['A'].to_sparse())
tm.assert_sp_series_equal(sparse['B'], orig['B'].to_sparse())
result = sparse[[1, 3, 4]]
exp = orig[[1, 3, 4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse[orig % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse[sparse % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_getitem_multi_tuple(self):
orig = self.orig
sparse = self.sparse
self.assertEqual(sparse['C', 0], orig['C', 0])
self.assertTrue(np.isnan(sparse['A', 1]))
self.assertTrue(np.isnan(sparse['B', 0]))
def test_getitems_slice_multi(self):
orig = self.orig
sparse = self.sparse
tm.assert_sp_series_equal(sparse[2:], orig[2:].to_sparse())
tm.assert_sp_series_equal(sparse.loc['B':], orig.loc['B':].to_sparse())
tm.assert_sp_series_equal(sparse.loc['C':], orig.loc['C':].to_sparse())
tm.assert_sp_series_equal(sparse.loc['A':'B'],
orig.loc['A':'B'].to_sparse())
tm.assert_sp_series_equal(sparse.loc[:'B'], orig.loc[:'B'].to_sparse())
def test_loc(self):
# need to be override to use different label
orig = self.orig
sparse = self.sparse
tm.assert_sp_series_equal(sparse.loc['A'],
orig.loc['A'].to_sparse())
tm.assert_sp_series_equal(sparse.loc['B'],
orig.loc['B'].to_sparse())
result = sparse.loc[[1, 3, 4]]
exp = orig.loc[[1, 3, 4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
# exceeds the bounds
result = sparse.loc[[1, 3, 4, 5]]
exp = orig.loc[[1, 3, 4, 5]].to_sparse()
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse.loc[orig % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse.loc[sparse % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse.loc[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_loc_multi_tuple(self):
orig = self.orig
sparse = self.sparse
self.assertEqual(sparse.loc['C', 0], orig.loc['C', 0])
self.assertTrue(np.isnan(sparse.loc['A', 1]))
self.assertTrue(np.isnan(sparse.loc['B', 0]))
def test_loc_slice(self):
orig = self.orig
sparse = self.sparse
tm.assert_sp_series_equal(sparse.loc['A':], orig.loc['A':].to_sparse())
tm.assert_sp_series_equal(sparse.loc['B':], orig.loc['B':].to_sparse())
tm.assert_sp_series_equal(sparse.loc['C':], orig.loc['C':].to_sparse())
tm.assert_sp_series_equal(sparse.loc['A':'B'],
orig.loc['A':'B'].to_sparse())
tm.assert_sp_series_equal(sparse.loc[:'B'], orig.loc[:'B'].to_sparse())
class TestSparseDataFrameIndexing(tm.TestCase):
_multiprocess_can_split_ = True
def test_getitem(self):
orig = pd.DataFrame([[1, np.nan, np.nan],
[2, 3, np.nan],
[np.nan, np.nan, 4],
[0, np.nan, 5]],
columns=list('xyz'))
sparse = orig.to_sparse()
tm.assert_sp_series_equal(sparse['x'], orig['x'].to_sparse())
tm.assert_sp_frame_equal(sparse[['x']], orig[['x']].to_sparse())
tm.assert_sp_frame_equal(sparse[['z', 'x']],
orig[['z', 'x']].to_sparse())
tm.assert_sp_frame_equal(sparse[[True, False, True, True]],
orig[[True, False, True, True]].to_sparse())
tm.assert_sp_frame_equal(sparse[[1, 2]],
orig[[1, 2]].to_sparse())
def test_getitem_fill_value(self):
orig = pd.DataFrame([[1, np.nan, 0],
[2, 3, np.nan],
[0, np.nan, 4],
[0, np.nan, 5]],
columns=list('xyz'))
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse['y'],
orig['y'].to_sparse(fill_value=0))
exp = orig[['x']].to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(sparse[['x']], exp)
exp = orig[['z', 'x']].to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(sparse[['z', 'x']], exp)
indexer = [True, False, True, True]
exp = orig[indexer].to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(sparse[indexer], exp)
exp = orig[[1, 2]].to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(sparse[[1, 2]], exp)
def test_loc(self):
orig = pd.DataFrame([[1, np.nan, np.nan],
[2, 3, np.nan],
[np.nan, np.nan, 4]],
columns=list('xyz'))
sparse = orig.to_sparse()
self.assertEqual(sparse.loc[0, 'x'], 1)
self.assertTrue(np.isnan(sparse.loc[1, 'z']))
self.assertEqual(sparse.loc[2, 'z'], 4)
tm.assert_sp_series_equal(sparse.loc[0], orig.loc[0].to_sparse())
tm.assert_sp_series_equal(sparse.loc[1], orig.loc[1].to_sparse())
tm.assert_sp_series_equal(sparse.loc[2, :],
orig.loc[2, :].to_sparse())
tm.assert_sp_series_equal(sparse.loc[2, :],
orig.loc[2, :].to_sparse())
tm.assert_sp_series_equal(sparse.loc[:, 'y'],
orig.loc[:, 'y'].to_sparse())
tm.assert_sp_series_equal(sparse.loc[:, 'y'],
orig.loc[:, 'y'].to_sparse())
result = sparse.loc[[1, 2]]
exp = orig.loc[[1, 2]].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.loc[[1, 2], :]
exp = orig.loc[[1, 2], :].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.loc[:, ['x', 'z']]
exp = orig.loc[:, ['x', 'z']].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.loc[[0, 2], ['x', 'z']]
exp = orig.loc[[0, 2], ['x', 'z']].to_sparse()
tm.assert_sp_frame_equal(result, exp)
# exceeds the bounds
result = sparse.loc[[1, 3, 4, 5]]
exp = orig.loc[[1, 3, 4, 5]].to_sparse()
tm.assert_sp_frame_equal(result, exp)
# dense array
result = sparse.loc[orig.x % 2 == 1]
exp = orig.loc[orig.x % 2 == 1].to_sparse()
tm.assert_sp_frame_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse.loc[sparse.x % 2 == 1]
exp = orig.loc[orig.x % 2 == 1].to_sparse()
tm.assert_sp_frame_equal(result, exp)
# sparse array
result = sparse.loc[pd.SparseArray(sparse.x % 2 == 1, dtype=bool)]
tm.assert_sp_frame_equal(result, exp)
def test_loc_index(self):
orig = pd.DataFrame([[1, np.nan, np.nan],
[2, 3, np.nan],
[np.nan, np.nan, 4]],
index=list('abc'), columns=list('xyz'))
sparse = orig.to_sparse()
self.assertEqual(sparse.loc['a', 'x'], 1)
self.assertTrue(np.isnan(sparse.loc['b', 'z']))
self.assertEqual(sparse.loc['c', 'z'], 4)
tm.assert_sp_series_equal(sparse.loc['a'], orig.loc['a'].to_sparse())
tm.assert_sp_series_equal(sparse.loc['b'], orig.loc['b'].to_sparse())
tm.assert_sp_series_equal(sparse.loc['b', :],
orig.loc['b', :].to_sparse())
tm.assert_sp_series_equal(sparse.loc['b', :],
orig.loc['b', :].to_sparse())
tm.assert_sp_series_equal(sparse.loc[:, 'z'],
orig.loc[:, 'z'].to_sparse())
tm.assert_sp_series_equal(sparse.loc[:, 'z'],
orig.loc[:, 'z'].to_sparse())
result = sparse.loc[['a', 'b']]
exp = orig.loc[['a', 'b']].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.loc[['a', 'b'], :]
exp = orig.loc[['a', 'b'], :].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.loc[:, ['x', 'z']]
exp = orig.loc[:, ['x', 'z']].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.loc[['c', 'a'], ['x', 'z']]
exp = orig.loc[['c', 'a'], ['x', 'z']].to_sparse()
tm.assert_sp_frame_equal(result, exp)
# dense array
result = sparse.loc[orig.x % 2 == 1]
exp = orig.loc[orig.x % 2 == 1].to_sparse()
tm.assert_sp_frame_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse.loc[sparse.x % 2 == 1]
exp = orig.loc[orig.x % 2 == 1].to_sparse()
tm.assert_sp_frame_equal(result, exp)
# sparse array
result = sparse.loc[pd.SparseArray(sparse.x % 2 == 1, dtype=bool)]
tm.assert_sp_frame_equal(result, exp)
def test_loc_slice(self):
orig = pd.DataFrame([[1, np.nan, np.nan],
[2, 3, np.nan],
[np.nan, np.nan, 4]],
columns=list('xyz'))
sparse = orig.to_sparse()
tm.assert_sp_frame_equal(sparse.loc[2:], orig.loc[2:].to_sparse())
def test_iloc(self):
orig = pd.DataFrame([[1, np.nan, np.nan],
[2, 3, np.nan],
[np.nan, np.nan, 4]])
sparse = orig.to_sparse()
self.assertEqual(sparse.iloc[1, 1], 3)
self.assertTrue(np.isnan(sparse.iloc[2, 0]))
tm.assert_sp_series_equal(sparse.iloc[0], orig.loc[0].to_sparse())
tm.assert_sp_series_equal(sparse.iloc[1], orig.loc[1].to_sparse())
tm.assert_sp_series_equal(sparse.iloc[2, :],
orig.iloc[2, :].to_sparse())
tm.assert_sp_series_equal(sparse.iloc[2, :],
orig.iloc[2, :].to_sparse())
tm.assert_sp_series_equal(sparse.iloc[:, 1],
orig.iloc[:, 1].to_sparse())
tm.assert_sp_series_equal(sparse.iloc[:, 1],
orig.iloc[:, 1].to_sparse())
result = sparse.iloc[[1, 2]]
exp = orig.iloc[[1, 2]].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.iloc[[1, 2], :]
exp = orig.iloc[[1, 2], :].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.iloc[:, [1, 0]]
exp = orig.iloc[:, [1, 0]].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.iloc[[2], [1, 0]]
exp = orig.iloc[[2], [1, 0]].to_sparse()
tm.assert_sp_frame_equal(result, exp)
with tm.assertRaises(IndexError):
sparse.iloc[[1, 3, 5]]
def test_iloc_slice(self):
orig = pd.DataFrame([[1, np.nan, np.nan],
[2, 3, np.nan],
[np.nan, np.nan, 4]],
columns=list('xyz'))
sparse = orig.to_sparse()
tm.assert_sp_frame_equal(sparse.iloc[2:], orig.iloc[2:].to_sparse())
def test_at(self):
orig = pd.DataFrame([[1, np.nan, 0],
[2, 3, np.nan],
[0, np.nan, 4],
[0, np.nan, 5]],
index=list('ABCD'), columns=list('xyz'))
sparse = orig.to_sparse()
self.assertEqual(sparse.at['A', 'x'], orig.at['A', 'x'])
self.assertTrue(np.isnan(sparse.at['B', 'z']))
self.assertTrue(np.isnan(sparse.at['C', 'y']))
self.assertEqual(sparse.at['D', 'x'], orig.at['D', 'x'])
def test_at_fill_value(self):
orig = pd.DataFrame([[1, np.nan, 0],
[2, 3, np.nan],
[0, np.nan, 4],
[0, np.nan, 5]],
index=list('ABCD'), columns=list('xyz'))
sparse = orig.to_sparse(fill_value=0)
self.assertEqual(sparse.at['A', 'x'], orig.at['A', 'x'])
self.assertTrue(np.isnan(sparse.at['B', 'z']))
self.assertTrue(np.isnan(sparse.at['C', 'y']))
self.assertEqual(sparse.at['D', 'x'], orig.at['D', 'x'])
def test_iat(self):
orig = pd.DataFrame([[1, np.nan, 0],
[2, 3, np.nan],
[0, np.nan, 4],
[0, np.nan, 5]],
index=list('ABCD'), columns=list('xyz'))
sparse = orig.to_sparse()
self.assertEqual(sparse.iat[0, 0], orig.iat[0, 0])
self.assertTrue(np.isnan(sparse.iat[1, 2]))
self.assertTrue(np.isnan(sparse.iat[2, 1]))
self.assertEqual(sparse.iat[2, 0], orig.iat[2, 0])
self.assertTrue(np.isnan(sparse.iat[-1, -2]))
self.assertEqual(sparse.iat[-1, -1], orig.iat[-1, -1])
def test_iat_fill_value(self):
orig = pd.DataFrame([[1, np.nan, 0],
[2, 3, np.nan],
[0, np.nan, 4],
[0, np.nan, 5]],
index=list('ABCD'), columns=list('xyz'))
sparse = orig.to_sparse(fill_value=0)
self.assertEqual(sparse.iat[0, 0], orig.iat[0, 0])
self.assertTrue(np.isnan(sparse.iat[1, 2]))
self.assertTrue(np.isnan(sparse.iat[2, 1]))
self.assertEqual(sparse.iat[2, 0], orig.iat[2, 0])
self.assertTrue(np.isnan(sparse.iat[-1, -2]))
self.assertEqual(sparse.iat[-1, -1], orig.iat[-1, -1])
def test_take(self):
orig = pd.DataFrame([[1, np.nan, 0],
[2, 3, np.nan],
[0, np.nan, 4],
[0, np.nan, 5]],
columns=list('xyz'))
sparse = orig.to_sparse()
tm.assert_sp_frame_equal(sparse.take([0]),
orig.take([0]).to_sparse())
tm.assert_sp_frame_equal(sparse.take([0, 1]),
orig.take([0, 1]).to_sparse())
tm.assert_sp_frame_equal(sparse.take([-1, -2]),
orig.take([-1, -2]).to_sparse())
def test_take_fill_value(self):
orig = pd.DataFrame([[1, np.nan, 0],
[2, 3, np.nan],
[0, np.nan, 4],
[0, np.nan, 5]],
columns=list('xyz'))
sparse = orig.to_sparse(fill_value=0)
exp = orig.take([0]).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(sparse.take([0]), exp)
exp = orig.take([0, 1]).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(sparse.take([0, 1]), exp)
exp = orig.take([-1, -2]).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(sparse.take([-1, -2]), exp)
def test_reindex(self):
orig = pd.DataFrame([[1, np.nan, 0],
[2, 3, np.nan],
[0, np.nan, 4],
[0, np.nan, 5]],
index=list('ABCD'), columns=list('xyz'))
sparse = orig.to_sparse()
res = sparse.reindex(['A', 'C', 'B'])
exp = orig.reindex(['A', 'C', 'B']).to_sparse()
tm.assert_sp_frame_equal(res, exp)
orig = pd.DataFrame([[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan]],
index=list('ABCD'), columns=list('xyz'))
sparse = orig.to_sparse()
res = sparse.reindex(['A', 'C', 'B'])
exp = orig.reindex(['A', 'C', 'B']).to_sparse()
tm.assert_sp_frame_equal(res, exp)
def test_reindex_fill_value(self):
orig = pd.DataFrame([[1, np.nan, 0],
[2, 3, np.nan],
[0, np.nan, 4],
[0, np.nan, 5]],
index=list('ABCD'), columns=list('xyz'))
sparse = orig.to_sparse(fill_value=0)
res = sparse.reindex(['A', 'C', 'B'])
exp = orig.reindex(['A', 'C', 'B']).to_sparse(fill_value=0)
tm.assert_sp_frame_equal(res, exp)
# all missing
orig = pd.DataFrame([[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan]],
index=list('ABCD'), columns=list('xyz'))
sparse = orig.to_sparse(fill_value=0)
res = sparse.reindex(['A', 'C', 'B'])
exp = orig.reindex(['A', 'C', 'B']).to_sparse(fill_value=0)
tm.assert_sp_frame_equal(res, exp)
# all fill_value
orig = pd.DataFrame([[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]],
index=list('ABCD'), columns=list('xyz'))
sparse = orig.to_sparse(fill_value=0)
res = sparse.reindex(['A', 'C', 'B'])
exp = orig.reindex(['A', 'C', 'B']).to_sparse(fill_value=0)
tm.assert_sp_frame_equal(res, exp)
class TestMultitype(tm.TestCase):
def setUp(self):
self.cols = ['string', 'int', 'float', 'object']
self.string_series = pd.SparseSeries(['a', 'b', 'c'])
self.int_series = pd.SparseSeries([1, 2, 3])
self.float_series = pd.SparseSeries([1.1, 1.2, 1.3])
self.object_series = pd.SparseSeries([[], {}, set()])
self.sdf = pd.SparseDataFrame({
'string': self.string_series,
'int': self.int_series,
'float': self.float_series,
'object': self.object_series,
})
self.sdf = self.sdf[self.cols]
self.ss = pd.SparseSeries(['a', 1, 1.1, []], index=self.cols)
def test_frame_basic_dtypes(self):
for _, row in self.sdf.iterrows():
self.assertEqual(row.dtype, object)
tm.assert_sp_series_equal(self.sdf['string'], self.string_series,
check_names=False)
tm.assert_sp_series_equal(self.sdf['int'], self.int_series,
check_names=False)
tm.assert_sp_series_equal(self.sdf['float'], self.float_series,
check_names=False)
tm.assert_sp_series_equal(self.sdf['object'], self.object_series,
check_names=False)
def test_frame_indexing_single(self):
tm.assert_sp_series_equal(self.sdf.iloc[0],
pd.SparseSeries(['a', 1, 1.1, []],
index=self.cols),
check_names=False)
tm.assert_sp_series_equal(self.sdf.iloc[1],
pd.SparseSeries(['b', 2, 1.2, {}],
index=self.cols),
check_names=False)
tm.assert_sp_series_equal(self.sdf.iloc[2],
pd.SparseSeries(['c', 3, 1.3, set()],
index=self.cols),
check_names=False)
def test_frame_indexing_multiple(self):
tm.assert_sp_frame_equal(self.sdf, self.sdf[:])
tm.assert_sp_frame_equal(self.sdf, self.sdf.loc[:])
tm.assert_sp_frame_equal(self.sdf.iloc[[1, 2]],
pd.SparseDataFrame({
'string': self.string_series.iloc[[1, 2]],
'int': self.int_series.iloc[[1, 2]],
'float': self.float_series.iloc[[1, 2]],
'object': self.object_series.iloc[[1, 2]]
}, index=[1, 2])[self.cols])
tm.assert_sp_frame_equal(self.sdf[['int', 'string']],
pd.SparseDataFrame({
'int': self.int_series,
'string': self.string_series,
}))
def test_series_indexing_single(self):
for i, idx in enumerate(self.cols):
self.assertEqual(self.ss.iloc[i], self.ss[idx])
self.assertEqual(type(self.ss.iloc[i]),
type(self.ss[idx]))
self.assertEqual(self.ss['string'], 'a')
self.assertEqual(self.ss['int'], 1)
self.assertEqual(self.ss['float'], 1.1)
self.assertEqual(self.ss['object'], [])
def test_series_indexing_multiple(self):
tm.assert_sp_series_equal(self.ss.loc[['string', 'int']],
pd.SparseSeries(['a', 1],
index=['string', 'int']))
tm.assert_sp_series_equal(self.ss.loc[['string', 'object']],
pd.SparseSeries(['a', []],
index=['string', 'object']))
| gpl-3.0 |
jongha/stock-ai | modules/venders/itooza.py | 1 | 4393 | #-*- coding: utf-8 -*-
import os
import re
import pandas as pd
from modules.venders.vender import Vender
class Itooza(Vender):
URL = 'http://search.itooza.com/index.htm?seName=%s'
def __init__(self, code, vender=None):
Vender.__init__(self, self.URL, vender)
response = self.load_url(code)
html, soup = response['html'], response['soup']
price_contents = soup.find(
'div', class_='item-detail').find('span').contents
price = ''.join(re.findall('\d+', price_contents[0])) if len(
price_contents) > 0 else 0
title = soup.find('div', class_='item-head').find('h1').contents[0].strip()
tables = soup.find_all('table', limit=4)
simple = self.get_data_simple(tables)
summary = self.get_data_summary(tables)
self.set_tables(tables)
self.set_json('PER', simple['PER'][0])
self.set_json('PBR', simple['PBR'][0])
self.set_json('ROE',
self.str_to_percent(simple['ROE = ROS * S/A * A/E'][0]))
self.set_json('EPS', simple['EPS'][0])
self.set_json('BPS', simple['BPS'][0])
self.set_json('DPS', simple['DPS'][0])
self.set_json('PER_5', summary['PER_5'][0])
self.set_json('PBR_5', summary['PBR_5'][0])
self.set_json('ROE_5', self.str_to_percent(summary['ROE_5'][0]))
self.set_json('EPS_5_GROWTH',
self.str_to_percent(summary['EPS_5_GROWTH'][0]))
self.set_json('BPS_5_GROWTH',
self.str_to_percent(summary['BPS_5_GROWTH'][0]))
def str_to_percent(self, value):
if not self.isNaN(value):
return float(value.split('%')[0]) / 100
else:
return value
def concat(self, column_name, value):
if value:
df = pd.DataFrame(columns=[column_name], index=self.data.index.values)
df[column_name][self.data.index[0]] = value
self.data = pd.concat(
[self.data, df], axis=1, join_axes=[self.data.index])
# return {
# 'price': price,
# 'title': title,
# 'eps': simple['EPS'].mean(),
# 'bps': simple['BPS'].mean(),
# 'per_5': summary['PER_5'][0],
# 'pbr_5': summary['PBR_5'][0],
# 'roe_5': summary['ROE_5'][0],
# 'eps_5_growth': summary['EPS_5_GROWTH'][0],
# 'bps_5_growth': summary['BPS_5_GROWTH'][0],
# 'roe_5_mean': data['ROE'].dropna()[:5].mean(),
# 'ros_5_mean': data['ROS'].dropna()[:5].mean(),
# 'simple': simple,
# 'summary': summary,
# 'data': data,
# 'html': html,
# }
def get_data_simple(self, tables):
df = pd.read_html(str(tables[0]), header=0)[0]
return df
def get_data_summary(self, tables):
df = pd.read_html(str(tables[1]), header=0)[0]
df.columns = ['PER_5', 'PBR_5', 'ROE_5', 'EPS_5_GROWTH', 'BPS_5_GROWTH']
return df
def set_tables(self, tables):
if len(tables) >= 4:
df = pd.read_html(str(tables[3]), header=0)[0]
columns = []
for index in range(len(df.columns)):
converted_column = self.date_column(df.columns[index])
if converted_column in columns:
converted_column += '.' + str(columns.count(converted_column))
columns.append(converted_column)
df.columns = columns
if len(df['MONTH'].dropna()) > 0:
for index in range(len(df['MONTH'])):
df.loc[index, ('MONTH')] = self.column_name(df['MONTH'][index])
df = df.transpose()
df.columns = df.iloc[0]
df = df.reindex(df.index.drop('MONTH'))
self.concat_data(df)
def date_column(self, data):
if not self.isNaN(data):
data = data.replace('월', '').replace('.', '-')
if bool(re.match('\d{2}-\d{2}', data)):
data = '20' + data[0:2]
elif bool(re.match('--\d{1,2}', data)):
data = self.id_generator()
else:
data = 'MONTH'
else:
data = self.id_generator()
return data
def column_name(self, data):
name = {
'주당순이익(EPS,연결지배)': 'EPS_IFRS',
'주당순이익(EPS,개별)': 'EPS',
'PER (배)': 'PER',
'주당순자산(지분법)': 'BPS',
'PBR (배)': 'PBR',
'주당 배당금': 'DIVIDEND_PRICE',
'시가 배당률 (%)': 'DIVIDEND_RATE',
'ROE (%)': 'ROE',
'순이익률 (%)': 'ROS',
'영업이익률 (%)': 'OPM',
}
if data and data in name:
return name[data]
return data
| mit |
hainn8x/gnuradio | gr-digital/examples/example_timing.py | 49 | 9180 | #!/usr/bin/env python
#
# Copyright 2011-2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, digital, filter
from gnuradio import blocks
from gnuradio import channels
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import sys
try:
import scipy
except ImportError:
print "Error: could not import scipy (http://www.scipy.org/)"
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: could not import pylab (http://matplotlib.sourceforge.net/)"
sys.exit(1)
from scipy import fftpack
class example_timing(gr.top_block):
def __init__(self, N, sps, rolloff, ntaps, bw, noise,
foffset, toffset, poffset, mode=0):
gr.top_block.__init__(self)
rrc_taps = filter.firdes.root_raised_cosine(
sps, sps, 1.0, rolloff, ntaps)
gain = bw
nfilts = 32
rrc_taps_rx = filter.firdes.root_raised_cosine(
nfilts, sps*nfilts, 1.0, rolloff, ntaps*nfilts)
data = 2.0*scipy.random.randint(0, 2, N) - 1.0
data = scipy.exp(1j*poffset) * data
self.src = blocks.vector_source_c(data.tolist(), False)
self.rrc = filter.interp_fir_filter_ccf(sps, rrc_taps)
self.chn = channels.channel_model(noise, foffset, toffset)
self.off = filter.fractional_resampler_cc(0.20, 1.0)
if mode == 0:
self.clk = digital.pfb_clock_sync_ccf(sps, gain, rrc_taps_rx,
nfilts, nfilts//2, 1)
self.taps = self.clk.taps()
self.dtaps = self.clk.diff_taps()
self.delay = int(scipy.ceil(((len(rrc_taps)-1)/2 +
(len(self.taps[0])-1)/2)/float(sps))) + 1
self.vsnk_err = blocks.vector_sink_f()
self.vsnk_rat = blocks.vector_sink_f()
self.vsnk_phs = blocks.vector_sink_f()
self.connect((self.clk,1), self.vsnk_err)
self.connect((self.clk,2), self.vsnk_rat)
self.connect((self.clk,3), self.vsnk_phs)
else: # mode == 1
mu = 0.5
gain_mu = bw
gain_omega = 0.25*gain_mu*gain_mu
omega_rel_lim = 0.02
self.clk = digital.clock_recovery_mm_cc(sps, gain_omega,
mu, gain_mu,
omega_rel_lim)
self.vsnk_err = blocks.vector_sink_f()
self.connect((self.clk,1), self.vsnk_err)
self.vsnk_src = blocks.vector_sink_c()
self.vsnk_clk = blocks.vector_sink_c()
self.connect(self.src, self.rrc, self.chn, self.off, self.clk, self.vsnk_clk)
self.connect(self.src, self.vsnk_src)
def main():
parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
parser.add_option("-N", "--nsamples", type="int", default=2000,
help="Set the number of samples to process [default=%default]")
parser.add_option("-S", "--sps", type="int", default=4,
help="Set the samples per symbol [default=%default]")
parser.add_option("-r", "--rolloff", type="eng_float", default=0.35,
help="Set the rolloff factor [default=%default]")
parser.add_option("-W", "--bandwidth", type="eng_float", default=2*scipy.pi/100.0,
help="Set the loop bandwidth (PFB) or gain (M&M) [default=%default]")
parser.add_option("-n", "--ntaps", type="int", default=45,
help="Set the number of taps in the filters [default=%default]")
parser.add_option("", "--noise", type="eng_float", default=0.0,
help="Set the simulation noise voltage [default=%default]")
parser.add_option("-f", "--foffset", type="eng_float", default=0.0,
help="Set the simulation's normalized frequency offset (in Hz) [default=%default]")
parser.add_option("-t", "--toffset", type="eng_float", default=1.0,
help="Set the simulation's timing offset [default=%default]")
parser.add_option("-p", "--poffset", type="eng_float", default=0.0,
help="Set the simulation's phase offset [default=%default]")
parser.add_option("-M", "--mode", type="int", default=0,
help="Set the recovery mode (0: polyphase, 1: M&M) [default=%default]")
(options, args) = parser.parse_args ()
# Adjust N for the interpolation by sps
options.nsamples = options.nsamples // options.sps
# Set up the program-under-test
put = example_timing(options.nsamples, options.sps, options.rolloff,
options.ntaps, options.bandwidth, options.noise,
options.foffset, options.toffset, options.poffset,
options.mode)
put.run()
if options.mode == 0:
data_src = scipy.array(put.vsnk_src.data()[20:])
data_clk = scipy.array(put.vsnk_clk.data()[20:])
data_err = scipy.array(put.vsnk_err.data()[20:])
data_rat = scipy.array(put.vsnk_rat.data()[20:])
data_phs = scipy.array(put.vsnk_phs.data()[20:])
f1 = pylab.figure(1, figsize=(12,10), facecolor='w')
# Plot the IQ symbols
s1 = f1.add_subplot(2,2,1)
s1.plot(data_src.real, data_src.imag, "bo")
s1.plot(data_clk.real, data_clk.imag, "ro")
s1.set_title("IQ")
s1.set_xlabel("Real part")
s1.set_ylabel("Imag part")
s1.set_xlim([-2, 2])
s1.set_ylim([-2, 2])
# Plot the symbols in time
delay = put.delay
m = len(data_clk.real)
s2 = f1.add_subplot(2,2,2)
s2.plot(data_src.real, "bs", markersize=10, label="Input")
s2.plot(data_clk.real[delay:], "ro", label="Recovered")
s2.set_title("Symbols")
s2.set_xlabel("Samples")
s2.set_ylabel("Real Part of Signals")
s2.legend()
# Plot the clock recovery loop's error
s3 = f1.add_subplot(2,2,3)
s3.plot(data_err, label="Error")
s3.plot(data_rat, 'r', label="Update rate")
s3.set_title("Clock Recovery Loop Error")
s3.set_xlabel("Samples")
s3.set_ylabel("Error")
s3.set_ylim([-0.5, 0.5])
s3.legend()
# Plot the clock recovery loop's error
s4 = f1.add_subplot(2,2,4)
s4.plot(data_phs)
s4.set_title("Clock Recovery Loop Filter Phase")
s4.set_xlabel("Samples")
s4.set_ylabel("Filter Phase")
diff_taps = put.dtaps
ntaps = len(diff_taps[0])
nfilts = len(diff_taps)
t = scipy.arange(0, ntaps*nfilts)
f3 = pylab.figure(3, figsize=(12,10), facecolor='w')
s31 = f3.add_subplot(2,1,1)
s32 = f3.add_subplot(2,1,2)
s31.set_title("Differential Filters")
s32.set_title("FFT of Differential Filters")
for i,d in enumerate(diff_taps):
D = 20.0*scipy.log10(1e-20+abs(fftpack.fftshift(fftpack.fft(d, 10000))))
s31.plot(t[i::nfilts].real, d, "-o")
s32.plot(D)
s32.set_ylim([-120, 10])
# If testing the M&M clock recovery loop
else:
data_src = scipy.array(put.vsnk_src.data()[20:])
data_clk = scipy.array(put.vsnk_clk.data()[20:])
data_err = scipy.array(put.vsnk_err.data()[20:])
f1 = pylab.figure(1, figsize=(12,10), facecolor='w')
# Plot the IQ symbols
s1 = f1.add_subplot(2,2,1)
s1.plot(data_src.real, data_src.imag, "o")
s1.plot(data_clk.real, data_clk.imag, "ro")
s1.set_title("IQ")
s1.set_xlabel("Real part")
s1.set_ylabel("Imag part")
s1.set_xlim([-2, 2])
s1.set_ylim([-2, 2])
# Plot the symbols in time
s2 = f1.add_subplot(2,2,2)
s2.plot(data_src.real, "bs", markersize=10, label="Input")
s2.plot(data_clk.real, "ro", label="Recovered")
s2.set_title("Symbols")
s2.set_xlabel("Samples")
s2.set_ylabel("Real Part of Signals")
s2.legend()
# Plot the clock recovery loop's error
s3 = f1.add_subplot(2,2,3)
s3.plot(data_err)
s3.set_title("Clock Recovery Loop Error")
s3.set_xlabel("Samples")
s3.set_ylabel("Error")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
RPGOne/Skynet | scikit-learn-c604ac39ad0e5b066d964df3e8f31ba7ebda1e0e/sklearn/ensemble/tests/test_weight_boosting.py | 3 | 14926 | """Testing for the boost module (sklearn.ensemble.boost)."""
import numpy as np
from numpy.testing import assert_array_equal, assert_array_less
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import AdaBoostRegressor
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.svm import SVC, SVR
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.utils import shuffle
from sklearn import datasets
# Common random state
rng = np.random.RandomState(0)
# Toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y_class = ["foo", "foo", "foo", 1, 1, 1] # test string class labels
y_regr = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
y_t_class = ["foo", 1, 1]
y_t_regr = [-1, 1, 1]
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data, iris.target = shuffle(iris.data, iris.target, random_state=rng)
# Load the boston dataset and randomly permute it
boston = datasets.load_boston()
boston.data, boston.target = shuffle(boston.data, boston.target,
random_state=rng)
def test_classification_toy():
"""Check classification on a toy dataset."""
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, random_state=0)
clf.fit(X, y_class)
assert_array_equal(clf.predict(T), y_t_class)
assert_array_equal(np.unique(np.asarray(y_t_class)), clf.classes_)
assert_equal(clf.predict_proba(T).shape, (len(T), 2))
assert_equal(clf.decision_function(T).shape, (len(T),))
def test_regression_toy():
"""Check classification on a toy dataset."""
clf = AdaBoostRegressor(random_state=0)
clf.fit(X, y_regr)
assert_array_equal(clf.predict(T), y_t_regr)
def test_iris():
"""Check consistency on dataset iris."""
classes = np.unique(iris.target)
clf_samme = prob_samme = None
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(iris.data, iris.target)
assert_array_equal(classes, clf.classes_)
proba = clf.predict_proba(iris.data)
if alg == "SAMME":
clf_samme = clf
prob_samme = proba
assert_equal(proba.shape[1], len(classes))
assert_equal(clf.decision_function(iris.data).shape[1], len(classes))
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with algorithm %s and score = %f" % \
(alg, score)
# Somewhat hacky regression test: prior to
# ae7adc880d624615a34bafdb1d75ef67051b8200,
# predict_proba returned SAMME.R values for SAMME.
clf_samme.algorithm = "SAMME.R"
assert_array_less(0,
np.abs(clf_samme.predict_proba(iris.data) - prob_samme))
def test_boston():
"""Check consistency on dataset boston house prices."""
clf = AdaBoostRegressor(random_state=0)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert score > 0.85
def test_staged_predict():
"""Check staged predictions."""
rng = np.random.RandomState(0)
iris_weights = rng.randint(10, size=iris.target.shape)
boston_weights = rng.randint(10, size=boston.target.shape)
# AdaBoost classification
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, n_estimators=10)
clf.fit(iris.data, iris.target, sample_weight=iris_weights)
predictions = clf.predict(iris.data)
staged_predictions = [p for p in clf.staged_predict(iris.data)]
proba = clf.predict_proba(iris.data)
staged_probas = [p for p in clf.staged_predict_proba(iris.data)]
score = clf.score(iris.data, iris.target, sample_weight=iris_weights)
staged_scores = [
s for s in clf.staged_score(
iris.data, iris.target, sample_weight=iris_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_probas), 10)
assert_array_almost_equal(proba, staged_probas[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
# AdaBoost regression
clf = AdaBoostRegressor(n_estimators=10, random_state=0)
clf.fit(boston.data, boston.target, sample_weight=boston_weights)
predictions = clf.predict(boston.data)
staged_predictions = [p for p in clf.staged_predict(boston.data)]
score = clf.score(boston.data, boston.target, sample_weight=boston_weights)
staged_scores = [
s for s in clf.staged_score(
boston.data, boston.target, sample_weight=boston_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
def test_gridsearch():
"""Check that base trees can be grid-searched."""
# AdaBoost classification
boost = AdaBoostClassifier(base_estimator=DecisionTreeClassifier())
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2),
'algorithm': ('SAMME', 'SAMME.R')}
clf = GridSearchCV(boost, parameters)
clf.fit(iris.data, iris.target)
# AdaBoost regression
boost = AdaBoostRegressor(base_estimator=DecisionTreeRegressor(),
random_state=0)
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2)}
clf = GridSearchCV(boost, parameters)
clf.fit(boston.data, boston.target)
def test_pickle():
"""Check pickability."""
import pickle
# Adaboost classifier
for alg in ['SAMME', 'SAMME.R']:
obj = AdaBoostClassifier(algorithm=alg)
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_equal(score, score2)
# Adaboost regressor
obj = AdaBoostRegressor(random_state=0)
obj.fit(boston.data, boston.target)
score = obj.score(boston.data, boston.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(boston.data, boston.target)
assert_equal(score, score2)
def test_importances():
"""Check variable importances."""
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=1)
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(X, y)
importances = clf.feature_importances_
assert_equal(importances.shape[0], 10)
assert_equal((importances[:3, np.newaxis] >= importances[3:]).all(),
True)
def test_error():
"""Test that it gives proper exception on deficient input."""
assert_raises(ValueError,
AdaBoostClassifier(learning_rate=-1).fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier(algorithm="foo").fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier().fit,
X, y_class, sample_weight=np.asarray([-1]))
def test_base_estimator():
"""Test different base estimators."""
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
# XXX doesn't work with y_class because RF doesn't support classes_
# Shouldn't AdaBoost run a LabelBinarizer?
clf = AdaBoostClassifier(RandomForestClassifier())
clf.fit(X, y_regr)
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
clf.fit(X, y_class)
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
clf = AdaBoostRegressor(RandomForestRegressor(), random_state=0)
clf.fit(X, y_regr)
clf = AdaBoostRegressor(SVR(), random_state=0)
clf.fit(X, y_regr)
def test_sparse_classification():
"""Check classification with sparse input."""
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVC, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_multilabel_classification(n_classes=1, n_samples=15,
n_features=5,
return_indicator=True,
random_state=42)
# Flatten y to a 1d array
y = np.ravel(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# decision_function
sparse_results = sparse_classifier.decision_function(X_test_sparse)
dense_results = dense_classifier.decision_function(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_log_proba
sparse_results = sparse_classifier.predict_log_proba(X_test_sparse)
dense_results = dense_classifier.predict_log_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_proba
sparse_results = sparse_classifier.predict_proba(X_test_sparse)
dense_results = dense_classifier.predict_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# score
sparse_results = sparse_classifier.score(X_test_sparse, y_test)
dense_results = dense_classifier.score(X_test, y_test)
assert_array_equal(sparse_results, dense_results)
# staged_decision_function
sparse_results = sparse_classifier.staged_decision_function(
X_test_sparse)
dense_results = dense_classifier.staged_decision_function(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict_proba
sparse_results = sparse_classifier.staged_predict_proba(X_test_sparse)
dense_results = dense_classifier.staged_predict_proba(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_score
sparse_results = sparse_classifier.staged_score(X_test_sparse,
y_test)
dense_results = dense_classifier.staged_score(X_test, y_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# Verify sparsity of data is maintained during training
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sparse_regression():
"""Check regression with sparse input."""
class CustomSVR(SVR):
"""SVR variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVR, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_regression(n_samples=15, n_features=50, n_targets=1,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostRegressor(
base_estimator=CustomSVR(probability=True),
random_state=1
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = dense_results = AdaBoostRegressor(
base_estimator=CustomSVR(probability=True),
random_state=1
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
if __name__ == "__main__":
import nose
nose.runmodule()
| bsd-3-clause |
n1ywb/wx | main.py | 1 | 6787 | #!/usr/bin/env python
"""
This program analyzes data from NOAA weather stations.
Copyright 2012 Jeff Laughlin Consulting LLC, All Rights Reserved.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import argparse
from collections import namedtuple
import datetime
import logging
import os
import os.path
try:
import cStringIO as StringIO
except ImportError:
import StringIO
import sys
import gzip
import requests
import requests_cache
import pandas
import numpy as np
import matplotlib.pyplot as plt
CACHE_DIR = os.path.join(os.environ['HOME'], '.wx')
ROOT = 'http://www1.ncdc.noaa.gov/pub/data/gsod'
ISH_HISTORY = 'ish-history.txt'
StationHistory = namedtuple('StationHistory',
('usaf', 'wban', 'name', 'country', 'state', 'lat', 'lon', 'el', 'begin', 'end'))
def parse_history_line(line):
"""Parse a line of station history data and return a StationHistory object."""
try: usaf = int(line[0:6])
except ValueError: usaf = None
try: wban = int(line[7:12])
except ValueError: wban = None
name = line[13:43]
country = line[43:45]
state = line[49:51]
try: lat = float(line[58:64]) / 1000.0
except ValueError: lat = None
try: lon = float(line[65:72]) / 1000.0
except ValueError: lon = None
try: el = float(line[73:79]) / 10.0
except ValueError: el = None
try:
begin_year = int(line[83:87])
begin_mo = int(line[87:89])
begin_day = int(line[89:91])
begin = datetime.date(begin_year, begin_mo, begin_day)
except ValueError:
begin = None
try:
end_year = int(line[92:96])
end_mo = int(line[96:98])
end_day = int(line[98:100])
end = datetime.date(end_year, end_mo, end_day)
except ValueError:
end = None
sh = StationHistory(
usaf = usaf,
wban = wban,
name = name,
country = country,
state = state,
lat = lat,
lon = lon,
el = el,
begin = begin,
end = end,
)
return sh
def get_station_histories():
"""Return a list of StationHistory objects."""
# TODO: Save the history in a sqlite db or something.
ish = requests.get('/'.join((ROOT, ISH_HISTORY)))
got_header = False
sh_list = []
for line in ish.text.split('\n')[22:]:
try:
sh_list.append(parse_history_line(line))
except Exception:
logging.error("Error processing line '%s'" % line, exc_info=True)
return sh_list
def parse_gsod_line(line):
"""Given a line of text from a GSOD file, return a tuple of (date, t_mean,
t_max, t_min)"""
t_mean = float(line[25:30])
t_mean = t_mean if t_mean < 9000 else float('nan')
t_max = float(line[102:108])
t_max = t_max if t_max < 9000 else float('nan')
t_min = float(line[110:116])
t_min = t_min if t_min < 9000 else float('nan')
year = int(line[14:18])
mo = int(line[18:20])
day = int(line[20:22])
date = datetime.date(year, mo, day)
return date, t_mean, t_max, t_min
def get_wban(wban, station_histories):
"""Fetch historical data for wban."""
# Could build an index but since we're only looking up one thing a linear
# search is probably fastest.
t_mean_list = []
t_max_list = []
t_min_list = []
idx_list = []
wban_sh = None
for sh in station_histories:
if wban == sh.wban:
wban_sh = sh
break
if wban_sh is None:
raise Exception("Couldn't find wban %s in station histories." % wban)
for year in xrange(wban_sh.begin.year, wban_sh.end.year + 1):
filename = '%06d-%05d-%04d.op.gz' % (wban_sh.usaf, wban_sh.wban, year)
url = '/'.join((ROOT, '%04d' % year, filename))
r = requests.get(url)
content = r.content
content_f = StringIO.StringIO(content)
content = gzip.GzipFile(fileobj=content_f).read()
for line in content.split('\n')[1:]:
try:
date, t_mean, t_max, t_min = parse_gsod_line(line)
doy = date.timetuple()[7]
t_mean_list.append(t_mean)
t_max_list.append(t_max)
t_min_list.append(t_min)
idx_list.append((year, doy))
except Exception:
if len(line) > 0:
logging.warn("Failed to parse line '%s'" % line, exc_info=True)
data = pandas.DataFrame(
{
't_mean': t_mean_list,
't_max': t_max_list,
't_min': t_min_list,
},
index = pandas.MultiIndex.from_tuples(idx_list, names=['year', 'doy'])
)
return data
def process_data(data):
"""Analyze historical data for wban."""
print data
print
print "All time record high temp: %s" % data.t_max.max()
print "All time record high mean temp: %s" % data.t_mean.max()
print "All time record low temp: %s" % data.t_min.min()
print "All time record low mean temp: %s" % data.t_mean.min()
print
cleaned = data.dropna()
annual = cleaned.groupby(level='year').mean()
plt.show(annual.plot())
annual = cleaned.groupby(level='year').max()
plt.show(annual.plot())
annual = cleaned.groupby(level='year').min()
plt.show(annual.plot())
def _setup_cache():
if not os.path.exists(CACHE_DIR):
os.makedirs(CACHE_DIR)
requests_cache.configure(os.path.join(CACHE_DIR, 'cache'))
def main(argv=None):
if argv is None:
argv = sys.argv
logging.basicConfig(level=logging.INFO)
ap = argparse.ArgumentParser(description=__doc__)
ap.add_argument('--wban')
ap.add_argument('-v', '--verbose', action='store_true', default=False)
args = ap.parse_args(argv[1:])
if args.verbose:
logging.getLogger('').setLevel(logging.DEBUG)
try:
_setup_cache()
station_histories = get_station_histories()
if args.wban is not None:
data = get_wban(int(args.wban), station_histories)
process_data(data)
except:
logging.critical("Exiting due to error", exc_info=True)
return -1
logging.debug("Exiting normally.")
return 0
if __name__ == '__main__':
sys.exit(main())
| gpl-3.0 |
kolas001/pyoptools | pyoptools/raytrace/_comp_lib/ccd.py | 9 | 8557 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
#------------------------------------------------------------------------------
# Copyright (c) 2007, Ricardo Amézquita Orozco
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license.
#
#
# Author: Ricardo Amézquita Orozco
# Description: CCD definitión module
# Symbols Defined: CCD
#------------------------------------------------------------------------------
#
'''
Definition of a CCD like object and helper functions
'''
#from enthought.traits.api import Float, Instance, HasTraits, Tuple, Int, Bool, Property
from scipy.misc import toimage
from scipy.interpolate import interp2d,bisplrep,bisplev
from numpy import arange, ma, meshgrid, linspace
from pyoptools.raytrace.component import Component
from pyoptools.raytrace.surface import ArrayDetector,Plane
from pyoptools.misc.pmisc import wavelength2RGB
from pyoptools.misc.lsq import polyfit2d
from pyoptools.raytrace.shape import Shape
from pyoptools.raytrace.shape import Rectangular
#from gui.plotutils import plot, figure, cm, legend
class CCD(Component):
'''**Class to define a CCD like detector**
*Attributes:*
*size*
Tuple with the phisical size of the CCD chip
*transparent*
Boolean to set the detector transparent characteristic. Not implemented
Using the same CCD, images of different resolutions can be simulated. See
the im_show and spot_diagram methods
'''
# Geometrical size of the CCD chip
#size = Tuple(Float(5),Float(5))
# Setting this attribute to *False*, make the CCD detector opaque
#transparent=Bool(True)
# Private attributes
# detector surface
#__d_surf = Instance(ArrayDetector)
#__d_surf = Instance(Plane)
def _get_hitlist(self):
return tuple(self.__d_surf.hit_list)
hit_list=property(_get_hitlist)
def __init__(self, size=(10,10), transparent=True,*args,**kwargs):
Component.__init__(self, *args, **kwargs)
self.__d_surf= Plane(shape=Rectangular(size=size))#ArrayDetector (size=self.size, transparent=self.transparent)
self.size=size
self.surflist["S1"]=(self.__d_surf,(0,0,0),(0,0,0))
self.material=1.
#~ def __reduce__(self):
#~ args=() #self.intensity,self.wavelength,self.n ,self.label,self.parent,self.pop,self.orig_surf)
#~ return(type(self),args,self.__getstate__())
#~
#~
#~ #TODO: Check if there is a better way to do this, because we are
#~ #rewriting the constructor values here
#~
#~ def __getstate__(self):
#~ return self.__d_surf,self.size,self.surflist,self.material
#~
#~ def __setstate__(self,state):
#~ self.__d_surf,self.size,self.surflist,self.material=state
def get_image(self,size=(256,256)):
"""
Returns the ccd hit_list as a grayscale PIL image
*Attributes:*
*size*
Tuple (dx,dy) containing the image size in pixels. Use this
attribute to set the simulated resolution.
"""
data= self.__d_surf.get_histogram(size)
return(toimage(data, high=255, low=0,cmin=0,cmax=data.max()))
def get_color_image(self, size=(256,256)):
"""
Returns the CCD hit_list as a color image, using the rays wavelenght.
*Attributes*
*size*
Tuple (dx,dy) containing the image size in pixels. Use this
attribute to set the simulated resolution.
"""
data= self.__d_surf.get_color_histogram(size)
return(toimage(data, high=255, low=0))
#~ def im_show(self,fig=None, size=(256,256),cmap=cm.gray,title='Image',color=False):
#~ """Shows a simulated image
#~
#~ *Attributes:*
#~
#~ *size*
#~ Tuple (dx,dy) containing the image size in pixels. Use this
#~ attribute to set the simulated resolution.
#~ *cmap*
#~ Color map to use in the image simulation. See the matplotlib.cm
#~ module for information about colormaps.
#~ *fig*
#~ Pylab figure where the plot will be made. If set to None
#~ a new figure will be created.
#~ """
#~ if fig == None:
#~ fig=figure()
#~
#~ self.__d_surf.im_show(size,cmap,title,color)
#~
#~
#~ def spot_diagram(self,fig=None, style="o", label=None):
#~ '''Plot a spot diagram in a pylab figure
#~
#~ Method that plots a spot diagram of the rays hitting the CCD.
#~
#~ *Attributes:*
#~
#~ *fig*
#~ Pylab figure where the plot will be made. If set to None
#~ a new figure will be created.
#~
#~ *style*
#~ Symbol to be used to represent the spot. See the pylab plot
#~ documentation for more information.
#~
#~ *label*
#~ String containig the label to show in the figure for this spot diagram.
#~ Can be used to identify diferent spot diagrams on the same figure.
#~ '''
#~
#~ if fig == None:
#~ fig=figure()
#~ X=[]
#~ Y=[]
#~ COL=[]
#~ if len(self.__d_surf._hit_list) >0:
#~ for i in self.__d_surf._hit_list:
#~ p=i[0]
#~ # Hitlist[1] points to the incident ray
#~ col=wavelength2RGB(i[1].wavelength)
#~ X.append(p[0])
#~ Y.append(p[1])
#~ COL.append(col)
#~ if label== None:
#~ plot(X, Y, style, figure=fig)
#~ else:
#~ plot(X, Y, style,label=label,figure=fig)
#~ legend()
#~ return fig
def get_optical_path_map(self,size=(20, 20), mask=None):
"""Return the optical path of the rays hitting the detector.
This method uses the optical path of the rays hitting the surface, to
create a optical path map. The returned value is an interpolation of the
values obtained by the rays.
Warning:
If the rays hitting the surface are produced by more than one
optical source, the returned map migth not be valid.
*Atributes*
*size*
Tuple (nx,ny) containing the number of samples of the returned map.
The map size will be the same as the CCD
*mask*
Shape instance containig the mask of the apperture. If not given,
the mask will be automatically calculated.
*Return value*
A masked array as defined in the numpy.ma module, containig the optical paths
"""
X,Y,Z=self.get_optical_path_data()
rv=bisplrep(X,Y,Z)
nx, ny=size
xs, ys=self.size
xi=-xs/2.
xf=-xi
yi=-ys/2.
yf=-yi
xd=linspace(xi, xf,nx)
yd=linspace(yi, yf,ny)
data=bisplev(xd,yd,rv)
if mask!=None:
assert(isinstance(mask, Shape))
X, Y=meshgrid(xd, yd)
m= ~mask.hit((X, Y, 0))
retval= ma.array(data, mask=m)
else:
retval=data
return retval
def get_optical_path_map_lsq(self,order=10):
"""Return the optical path of the rays hitting the detector.
*Atributes*
"""
X,Y,Z=self.get_optical_path_data()
e,p=polyfit2d(X, Y, Z, order=order)
return e,p
def get_optical_path_data(self):
"""Return the optical path of the rays hitting the detector.
This method returns a tuple X,Y,D, containing the X,Y hit points, and
D containing tha optical path data
Warning:
If the rays hitting the surface are produced by more than one
optical source, the information may not be valid.
"""
X=[]
Y=[]
Z=[]
for ip,r in self.hit_list:
x,y,z= ip
d= r.optical_path()
X.append(x)
Y.append(y)
Z.append(d)
return X,Y,Z
| bsd-3-clause |
xubenben/scikit-learn | examples/model_selection/plot_precision_recall.py | 249 | 6150 | """
================
Precision-Recall
================
Example of Precision-Recall metric to evaluate classifier output quality.
In information retrieval, precision is a measure of result relevancy, while
recall is a measure of how many truly relevant results are returned. A high
area under the curve represents both high recall and high precision, where high
precision relates to a low false positive rate, and high recall relates to a
low false negative rate. High scores for both show that the classifier is
returning accurate results (high precision), as well as returning a majority of
all positive results (high recall).
A system with high recall but low precision returns many results, but most of
its predicted labels are incorrect when compared to the training labels. A
system with high precision but low recall is just the opposite, returning very
few results, but most of its predicted labels are correct when compared to the
training labels. An ideal system with high precision and high recall will
return many results, with all results labeled correctly.
Precision (:math:`P`) is defined as the number of true positives (:math:`T_p`)
over the number of true positives plus the number of false positives
(:math:`F_p`).
:math:`P = \\frac{T_p}{T_p+F_p}`
Recall (:math:`R`) is defined as the number of true positives (:math:`T_p`)
over the number of true positives plus the number of false negatives
(:math:`F_n`).
:math:`R = \\frac{T_p}{T_p + F_n}`
These quantities are also related to the (:math:`F_1`) score, which is defined
as the harmonic mean of precision and recall.
:math:`F1 = 2\\frac{P \\times R}{P+R}`
It is important to note that the precision may not decrease with recall. The
definition of precision (:math:`\\frac{T_p}{T_p + F_p}`) shows that lowering
the threshold of a classifier may increase the denominator, by increasing the
number of results returned. If the threshold was previously set too high, the
new results may all be true positives, which will increase precision. If the
previous threshold was about right or too low, further lowering the threshold
will introduce false positives, decreasing precision.
Recall is defined as :math:`\\frac{T_p}{T_p+F_n}`, where :math:`T_p+F_n` does
not depend on the classifier threshold. This means that lowering the classifier
threshold may increase recall, by increasing the number of true positive
results. It is also possible that lowering the threshold may leave recall
unchanged, while the precision fluctuates.
The relationship between recall and precision can be observed in the
stairstep area of the plot - at the edges of these steps a small change
in the threshold considerably reduces precision, with only a minor gain in
recall. See the corner at recall = .59, precision = .8 for an example of this
phenomenon.
Precision-recall curves are typically used in binary classification to study
the output of a classifier. In order to extend Precision-recall curve and
average precision to multi-class or multi-label classification, it is necessary
to binarize the output. One curve can be drawn per label, but one can also draw
a precision-recall curve by considering each element of the label indicator
matrix as a binary prediction (micro-averaging).
.. note::
See also :func:`sklearn.metrics.average_precision_score`,
:func:`sklearn.metrics.recall_score`,
:func:`sklearn.metrics.precision_score`,
:func:`sklearn.metrics.f1_score`
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn import svm, datasets
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# Split into training and test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=random_state)
# Run classifier
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
random_state=random_state))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute Precision-Recall and plot curve
precision = dict()
recall = dict()
average_precision = dict()
for i in range(n_classes):
precision[i], recall[i], _ = precision_recall_curve(y_test[:, i],
y_score[:, i])
average_precision[i] = average_precision_score(y_test[:, i], y_score[:, i])
# Compute micro-average ROC curve and ROC area
precision["micro"], recall["micro"], _ = precision_recall_curve(y_test.ravel(),
y_score.ravel())
average_precision["micro"] = average_precision_score(y_test, y_score,
average="micro")
# Plot Precision-Recall curve
plt.clf()
plt.plot(recall[0], precision[0], label='Precision-Recall curve')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('Precision-Recall example: AUC={0:0.2f}'.format(average_precision[0]))
plt.legend(loc="lower left")
plt.show()
# Plot Precision-Recall curve for each class
plt.clf()
plt.plot(recall["micro"], precision["micro"],
label='micro-average Precision-recall curve (area = {0:0.2f})'
''.format(average_precision["micro"]))
for i in range(n_classes):
plt.plot(recall[i], precision[i],
label='Precision-recall curve of class {0} (area = {1:0.2f})'
''.format(i, average_precision[i]))
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('Extension of Precision-Recall curve to multi-class')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
idlead/scikit-learn | sklearn/neighbors/tests/test_nearest_centroid.py | 305 | 4121 | """
Testing for the nearest centroid module.
"""
import numpy as np
from scipy import sparse as sp
from numpy.testing import assert_array_equal
from numpy.testing import assert_equal
from sklearn.neighbors import NearestCentroid
from sklearn import datasets
from sklearn.metrics.pairwise import pairwise_distances
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
X_csr = sp.csr_matrix(X) # Sparse matrix
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
T_csr = sp.csr_matrix(T)
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_classification_toy():
# Check classification on a toy dataset, including sparse versions.
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# Same test, but with a sparse matrix to fit and test.
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit with sparse, test with non-sparse
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T), true_result)
# Fit with non-sparse, test with sparse
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit and predict with non-CSR sparse matrices
clf = NearestCentroid()
clf.fit(X_csr.tocoo(), y)
assert_array_equal(clf.predict(T_csr.tolil()), true_result)
def test_precomputed():
clf = NearestCentroid(metric="precomputed")
clf.fit(X, y)
S = pairwise_distances(T, clf.centroids_)
assert_array_equal(clf.predict(S), true_result)
def test_iris():
# Check consistency on dataset iris.
for metric in ('euclidean', 'cosine'):
clf = NearestCentroid(metric=metric).fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.9, "Failed with score = " + str(score)
def test_iris_shrinkage():
# Check consistency on dataset iris, when using shrinkage.
for metric in ('euclidean', 'cosine'):
for shrink_threshold in [None, 0.1, 0.5]:
clf = NearestCentroid(metric=metric,
shrink_threshold=shrink_threshold)
clf = clf.fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.8, "Failed with score = " + str(score)
def test_pickle():
import pickle
# classification
obj = NearestCentroid()
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_array_equal(score, score2,
"Failed to generate same score"
" after pickling (classification).")
def test_shrinkage_threshold_decoded_y():
clf = NearestCentroid(shrink_threshold=0.01)
y_ind = np.asarray(y)
y_ind[y_ind == -1] = 0
clf.fit(X, y_ind)
centroid_encoded = clf.centroids_
clf.fit(X, y)
assert_array_equal(centroid_encoded, clf.centroids_)
def test_predict_translated_data():
# Test that NearestCentroid gives same results on translated data
rng = np.random.RandomState(0)
X = rng.rand(50, 50)
y = rng.randint(0, 3, 50)
noise = rng.rand(50)
clf = NearestCentroid(shrink_threshold=0.1)
clf.fit(X, y)
y_init = clf.predict(X)
clf = NearestCentroid(shrink_threshold=0.1)
X_noise = X + noise
clf.fit(X_noise, y)
y_translate = clf.predict(X_noise)
assert_array_equal(y_init, y_translate)
def test_manhattan_metric():
# Test the manhattan metric.
clf = NearestCentroid(metric='manhattan')
clf.fit(X, y)
dense_centroid = clf.centroids_
clf.fit(X_csr, y)
assert_array_equal(clf.centroids_, dense_centroid)
assert_array_equal(dense_centroid, [[-1, -1], [1, 1]])
| bsd-3-clause |
fbagirov/scikit-learn | examples/decomposition/plot_pca_iris.py | 253 | 1801 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
PCA example with Iris Data-set
=========================================================
Principal Component Analysis applied to the Iris dataset.
See `here <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import decomposition
from sklearn import datasets
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
fig = plt.figure(1, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
pca = decomposition.PCA(n_components=3)
pca.fit(X)
X = pca.transform(X)
for name, label in [('Setosa', 0), ('Versicolour', 1), ('Virginica', 2)]:
ax.text3D(X[y == label, 0].mean(),
X[y == label, 1].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=y, cmap=plt.cm.spectral)
x_surf = [X[:, 0].min(), X[:, 0].max(),
X[:, 0].min(), X[:, 0].max()]
y_surf = [X[:, 0].max(), X[:, 0].max(),
X[:, 0].min(), X[:, 0].min()]
x_surf = np.array(x_surf)
y_surf = np.array(y_surf)
v0 = pca.transform(pca.components_[0])
v0 /= v0[-1]
v1 = pca.transform(pca.components_[1])
v1 /= v1[-1]
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
plt.show()
| bsd-3-clause |
glouppe/scikit-learn | examples/plot_digits_pipe.py | 70 | 1813 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Pipelining: chaining a PCA and a logistic regression
=========================================================
The PCA does an unsupervised dimensionality reduction, while the logistic
regression does the prediction.
We use a GridSearchCV to set the dimensionality of the PCA
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, decomposition, datasets
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
logistic = linear_model.LogisticRegression()
pca = decomposition.PCA()
pipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)])
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
###############################################################################
# Plot the PCA spectrum
pca.fit(X_digits)
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.axes([.2, .2, .7, .7])
plt.plot(pca.explained_variance_, linewidth=2)
plt.axis('tight')
plt.xlabel('n_components')
plt.ylabel('explained_variance_')
###############################################################################
# Prediction
n_components = [20, 40, 64]
Cs = np.logspace(-4, 4, 3)
#Parameters of pipelines can be set using ‘__’ separated parameter names:
estimator = GridSearchCV(pipe,
dict(pca__n_components=n_components,
logistic__C=Cs))
estimator.fit(X_digits, y_digits)
plt.axvline(estimator.best_estimator_.named_steps['pca'].n_components,
linestyle=':', label='n_components chosen')
plt.legend(prop=dict(size=12))
plt.show()
| bsd-3-clause |
jiaphuan/models | research/cognitive_mapping_and_planning/scripts/script_env_vis.py | 5 | 6042 | # Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A simple python function to walk in the enviornments that we have created.
PYTHONPATH='.' PYOPENGL_PLATFORM=egl python scripts/script_env_vis.py \
--dataset_name sbpd --building_name area3
"""
import sys
import numpy as np
import matplotlib
matplotlib.use('TkAgg')
from PIL import ImageTk, Image
import Tkinter as tk
import logging
from tensorflow.python.platform import app
from tensorflow.python.platform import flags
import datasets.nav_env_config as nec
import datasets.nav_env as nav_env
import cv2
from datasets import factory
import render.swiftshader_renderer as renderer
SwiftshaderRenderer = renderer.SwiftshaderRenderer
VisualNavigationEnv = nav_env.VisualNavigationEnv
FLAGS = flags.FLAGS
flags.DEFINE_string('dataset_name', 'sbpd', 'Name of the dataset.')
flags.DEFINE_float('fov', 60., 'Field of view')
flags.DEFINE_integer('image_size', 512, 'Size of the image.')
flags.DEFINE_string('building_name', '', 'Name of the building.')
def get_args():
navtask = nec.nav_env_base_config()
navtask.task_params.type = 'rng_rejection_sampling_many'
navtask.task_params.rejection_sampling_M = 2000
navtask.task_params.min_dist = 10
sz = FLAGS.image_size
navtask.camera_param.fov = FLAGS.fov
navtask.camera_param.height = sz
navtask.camera_param.width = sz
navtask.task_params.img_height = sz
navtask.task_params.img_width = sz
# navtask.task_params.semantic_task.class_map_names = ['chair', 'door', 'table']
# navtask.task_params.type = 'to_nearest_obj_acc'
logging.info('navtask: %s', navtask)
return navtask
def load_building(dataset_name, building_name):
dataset = factory.get_dataset(dataset_name)
navtask = get_args()
cp = navtask.camera_param
rgb_shader, d_shader = renderer.get_shaders(cp.modalities)
r_obj = SwiftshaderRenderer()
r_obj.init_display(width=cp.width, height=cp.height,
fov=cp.fov, z_near=cp.z_near, z_far=cp.z_far,
rgb_shader=rgb_shader, d_shader=d_shader)
r_obj.clear_scene()
b = VisualNavigationEnv(robot=navtask.robot, env=navtask.env,
task_params=navtask.task_params,
building_name=building_name, flip=False,
logdir=None, building_loader=dataset,
r_obj=r_obj)
b.load_building_into_scene()
b.set_building_visibility(False)
return b
def walk_through(b):
# init agent at a random location in the environment.
init_env_state = b.reset([np.random.RandomState(0), np.random.RandomState(0)])
global current_node
rng = np.random.RandomState(0)
current_node = rng.choice(b.task.nodes.shape[0])
root = tk.Tk()
image = b.render_nodes(b.task.nodes[[current_node],:])[0]
print(image.shape)
image = image.astype(np.uint8)
im = Image.fromarray(image)
im = ImageTk.PhotoImage(im)
panel = tk.Label(root, image=im)
map_size = b.traversible.shape
sc = np.max(map_size)/256.
loc = np.array([[map_size[1]/2., map_size[0]/2.]])
x_axis = np.zeros_like(loc); x_axis[:,1] = sc
y_axis = np.zeros_like(loc); y_axis[:,0] = -sc
cum_fs, cum_valid = nav_env.get_map_to_predict(loc, x_axis, y_axis,
map=b.traversible*1.,
map_size=256)
cum_fs = cum_fs[0]
cum_fs = cv2.applyColorMap((cum_fs*255).astype(np.uint8), cv2.COLORMAP_JET)
im = Image.fromarray(cum_fs)
im = ImageTk.PhotoImage(im)
panel_overhead = tk.Label(root, image=im)
def refresh():
global current_node
image = b.render_nodes(b.task.nodes[[current_node],:])[0]
image = image.astype(np.uint8)
im = Image.fromarray(image)
im = ImageTk.PhotoImage(im)
panel.configure(image=im)
panel.image = im
def left_key(event):
global current_node
current_node = b.take_action([current_node], [2], 1)[0][0]
refresh()
def up_key(event):
global current_node
current_node = b.take_action([current_node], [3], 1)[0][0]
refresh()
def right_key(event):
global current_node
current_node = b.take_action([current_node], [1], 1)[0][0]
refresh()
def quit(event):
root.destroy()
panel_overhead.grid(row=4, column=5, rowspan=1, columnspan=1,
sticky=tk.W+tk.E+tk.N+tk.S)
panel.bind('<Left>', left_key)
panel.bind('<Up>', up_key)
panel.bind('<Right>', right_key)
panel.bind('q', quit)
panel.focus_set()
panel.grid(row=0, column=0, rowspan=5, columnspan=5,
sticky=tk.W+tk.E+tk.N+tk.S)
root.mainloop()
def simple_window():
root = tk.Tk()
image = np.zeros((128, 128, 3), dtype=np.uint8)
image[32:96, 32:96, 0] = 255
im = Image.fromarray(image)
im = ImageTk.PhotoImage(im)
image = np.zeros((128, 128, 3), dtype=np.uint8)
image[32:96, 32:96, 1] = 255
im2 = Image.fromarray(image)
im2 = ImageTk.PhotoImage(im2)
panel = tk.Label(root, image=im)
def left_key(event):
panel.configure(image=im2)
panel.image = im2
def quit(event):
sys.exit()
panel.bind('<Left>', left_key)
panel.bind('<Up>', left_key)
panel.bind('<Down>', left_key)
panel.bind('q', quit)
panel.focus_set()
panel.pack(side = "bottom", fill = "both", expand = "yes")
root.mainloop()
def main(_):
b = load_building(FLAGS.dataset_name, FLAGS.building_name)
walk_through(b)
if __name__ == '__main__':
app.run()
| apache-2.0 |
Vrekrer/magdynlab | utils/MxH.py | 2 | 3604 | # coding=utf-8
# Author: Diego Gonzalez Chavez
# email : [email protected] / [email protected]
import numpy
import matplotlib.pyplot as plt
class MxH(object):
def __init__(self, file_name, Hs = 100, smooth = None,
incl = False, Hcs = False, Invert = False,
skiprows = 'Auto', usecols = 'Auto', delimiter = ' '):
self.file_name = file_name
self.Analizar(Hs, smooth, incl, Hcs, Invert, skiprows, usecols, delimiter)
def Analizar(self, Hs = 100, smooth = None,
incl = False, Hcs = False, Invert = False,
skiprows = 'Auto', usecols = 'Auto', delimiter = ' '):
#Nomalizacion y calculo de campos coersivo y de exchange
if skiprows == 'Auto':
skiprows = 0
if usecols == 'Auto':
usecols = (0,1,2)
try:
(H,M,E) = numpy.loadtxt(self.file_name, unpack = True,
usecols = usecols, skiprows = skiprows,
delimiter = delimiter)
except:
(H,M) = numpy.loadtxt(self.file_name, unpack = True,
usecols = usecols[0:2], skiprows = skiprows,
delimiter = delimiter)
E = numpy.ones_like(H)
if Invert:
M *= -1
H *= -1
if smooth != None:
M = smoothX(M, smooth)
self.M_Raw = M
self.H = H
self.E = E
if numpy.isscalar(Hs):
Hs = [-Hs, Hs]
Hn = H[H<Hs[0]]
Mn = M[H<Hs[0]]
En = E[H<Hs[0]] + 1E-25
Wn = (1/En)/(1/En).sum()
Hp = H[H>Hs[1]]
Mp = M[H>Hs[1]]
Ep = E[H>Hs[1]] + 1E-25
Wp = (1/Ep)/(1/Ep).sum()
if incl:
#Tirar la inclinacion de la curva
pn = numpy.polyfit(Hn,Mn,1,w=Wn)
pp = numpy.polyfit(Hp,Mp,1,w=Wp)
M = M - H*(pn[0]*len(Hn)+pp[0]*len(Hp))/(len(Hn) + len(Hp))
Mn = M[H<Hs[0]]
Mp = M[H>Hs[1]]
Mm = (Mn*Wn).sum()
MM = (Mp*Wp).sum()
self.M = ( M - (MM+Mm)/2 )
self.Mn = self.M / ((MM-Mm)/2)
self.Ms = (MM-Mm)/2.0
if Hcs:
smag = numpy.sign(self.Mn)
smag[0] = 0
tst1 = numpy.abs(smag - numpy.roll(smag, 1)) == 2
tst2 = numpy.abs(smag - numpy.roll(smag, -1)) == 2
tst = tst1 + tst2
Ht = H[tst]
Mt = self.Mn[tst]
H1 = Ht[0] - ((Ht[0]-Ht[1])/(Mt[0]-Mt[1]))*Mt[0]
H2 = Ht[-1] - ((Ht[-1]-Ht[-2])/(Mt[-1]-Mt[-2]))*Mt[-1]
self.Hex = (H1 + H2)/2
self.Hc = numpy.abs(H1 - H2)/2
def plotM(self, opt = 'bo-', figN = 1):
plt.figure(figN)
plt.plot(self.H, self.M, opt)
plt.grid(True)
plt.xlabel('Field (Oe)')
plt.ylabel('Magentization (e.m.u.)')
def plotMn(self, opt = 'bo-', figN = 2):
plt.figure(figN)
plt.plot(self.H, self.Mn, opt)
plt.grid(True)
plt.xlabel('H(Oe)')
plt.ylabel('M/Ms')
def saveMn(self, file_name):
numpy.savetxt(file_name, numpy.array([self.H, self.Mn]).T, fmt = '%.5e')
def saveM(self, file_name):
numpy.savetxt(file_name, numpy.array([self.H, self.M]).T, fmt = '%.5e')
def smoothX(x, wl = 5):
w= numpy.hanning(2*wl+1)
w = w/w.sum()
s = numpy.r_[numpy.ones(wl)*x[0], x, numpy.ones(wl)*x[-1]]
y=numpy.convolve(w/w.sum(),s,mode='valid')
return y
| mit |
madjelan/scikit-learn | sklearn/decomposition/truncated_svd.py | 199 | 7744 | """Truncated SVD for sparse matrices, aka latent semantic analysis (LSA).
"""
# Author: Lars Buitinck <[email protected]>
# Olivier Grisel <[email protected]>
# Michael Becker <[email protected]>
# License: 3-clause BSD.
import numpy as np
import scipy.sparse as sp
try:
from scipy.sparse.linalg import svds
except ImportError:
from ..utils.arpack import svds
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array, as_float_array, check_random_state
from ..utils.extmath import randomized_svd, safe_sparse_dot, svd_flip
from ..utils.sparsefuncs import mean_variance_axis
__all__ = ["TruncatedSVD"]
class TruncatedSVD(BaseEstimator, TransformerMixin):
"""Dimensionality reduction using truncated SVD (aka LSA).
This transformer performs linear dimensionality reduction by means of
truncated singular value decomposition (SVD). It is very similar to PCA,
but operates on sample vectors directly, instead of on a covariance matrix.
This means it can work with scipy.sparse matrices efficiently.
In particular, truncated SVD works on term count/tf-idf matrices as
returned by the vectorizers in sklearn.feature_extraction.text. In that
context, it is known as latent semantic analysis (LSA).
This estimator supports two algorithm: a fast randomized SVD solver, and
a "naive" algorithm that uses ARPACK as an eigensolver on (X * X.T) or
(X.T * X), whichever is more efficient.
Read more in the :ref:`User Guide <LSA>`.
Parameters
----------
n_components : int, default = 2
Desired dimensionality of output data.
Must be strictly less than the number of features.
The default value is useful for visualisation. For LSA, a value of
100 is recommended.
algorithm : string, default = "randomized"
SVD solver to use. Either "arpack" for the ARPACK wrapper in SciPy
(scipy.sparse.linalg.svds), or "randomized" for the randomized
algorithm due to Halko (2009).
n_iter : int, optional
Number of iterations for randomized SVD solver. Not used by ARPACK.
random_state : int or RandomState, optional
(Seed for) pseudo-random number generator. If not given, the
numpy.random singleton is used.
tol : float, optional
Tolerance for ARPACK. 0 means machine precision. Ignored by randomized
SVD solver.
Attributes
----------
components_ : array, shape (n_components, n_features)
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components.
explained_variance_ : array, [n_components]
The variance of the training samples transformed by a projection to
each component.
Examples
--------
>>> from sklearn.decomposition import TruncatedSVD
>>> from sklearn.random_projection import sparse_random_matrix
>>> X = sparse_random_matrix(100, 100, density=0.01, random_state=42)
>>> svd = TruncatedSVD(n_components=5, random_state=42)
>>> svd.fit(X) # doctest: +NORMALIZE_WHITESPACE
TruncatedSVD(algorithm='randomized', n_components=5, n_iter=5,
random_state=42, tol=0.0)
>>> print(svd.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.07825... 0.05528... 0.05445... 0.04997... 0.04134...]
>>> print(svd.explained_variance_ratio_.sum()) # doctest: +ELLIPSIS
0.27930...
See also
--------
PCA
RandomizedPCA
References
----------
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
Notes
-----
SVD suffers from a problem called "sign indeterminancy", which means the
sign of the ``components_`` and the output from transform depend on the
algorithm and random state. To work around this, fit instances of this
class to data once, then keep the instance around to do transformations.
"""
def __init__(self, n_components=2, algorithm="randomized", n_iter=5,
random_state=None, tol=0.):
self.algorithm = algorithm
self.n_components = n_components
self.n_iter = n_iter
self.random_state = random_state
self.tol = tol
def fit(self, X, y=None):
"""Fit LSI model on training data X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
self : object
Returns the transformer object.
"""
self.fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit LSI model to X and perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = as_float_array(X, copy=False)
random_state = check_random_state(self.random_state)
# If sparse and not csr or csc, convert to csr
if sp.issparse(X) and X.getformat() not in ["csr", "csc"]:
X = X.tocsr()
if self.algorithm == "arpack":
U, Sigma, VT = svds(X, k=self.n_components, tol=self.tol)
# svds doesn't abide by scipy.linalg.svd/randomized_svd
# conventions, so reverse its outputs.
Sigma = Sigma[::-1]
U, VT = svd_flip(U[:, ::-1], VT[::-1])
elif self.algorithm == "randomized":
k = self.n_components
n_features = X.shape[1]
if k >= n_features:
raise ValueError("n_components must be < n_features;"
" got %d >= %d" % (k, n_features))
U, Sigma, VT = randomized_svd(X, self.n_components,
n_iter=self.n_iter,
random_state=random_state)
else:
raise ValueError("unknown algorithm %r" % self.algorithm)
self.components_ = VT
# Calculate explained variance & explained variance ratio
X_transformed = np.dot(U, np.diag(Sigma))
self.explained_variance_ = exp_var = np.var(X_transformed, axis=0)
if sp.issparse(X):
_, full_var = mean_variance_axis(X, axis=0)
full_var = full_var.sum()
else:
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
return X_transformed
def transform(self, X):
"""Perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = check_array(X, accept_sparse='csr')
return safe_sparse_dot(X, self.components_.T)
def inverse_transform(self, X):
"""Transform X back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data.
Returns
-------
X_original : array, shape (n_samples, n_features)
Note that this is always a dense array.
"""
X = check_array(X)
return np.dot(X, self.components_)
| bsd-3-clause |
jjx02230808/project0223 | sklearn/feature_extraction/tests/test_text.py | 59 | 35604 | from __future__ import unicode_literals
import warnings
from sklearn.feature_extraction.text import strip_tags
from sklearn.feature_extraction.text import strip_accents_unicode
from sklearn.feature_extraction.text import strip_accents_ascii
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.base import clone
import numpy as np
from nose import SkipTest
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_almost_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_raises
from sklearn.utils.random import choice
from sklearn.utils.testing import (assert_in, assert_less, assert_greater,
assert_warns_message, assert_raise_message,
clean_warning_registry)
from collections import defaultdict, Mapping
from functools import partial
import pickle
from io import StringIO
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
NOTJUNK_FOOD_DOCS = (
"the salad celeri copyright",
"the salad salad sparkling water copyright",
"the the celeri celeri copyright",
"the tomato tomato salad water",
"the tomato salad water copyright",
)
ALL_FOOD_DOCS = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
def uppercase(s):
return strip_accents_unicode(s).upper()
def strip_eacute(s):
return s.replace('\xe9', 'e')
def split_tokenize(s):
return s.split()
def lazy_analyze(s):
return ['the_ultimate_feature']
def test_strip_accents():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_unicode(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_unicode(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '\u0627' # simple halef
assert_equal(strip_accents_unicode(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_unicode(a), expected)
def test_to_ascii():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_ascii(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_ascii(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '' # halef has no direct ascii match
assert_equal(strip_accents_ascii(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_ascii(a), expected)
def test_word_analyzer_unigrams():
for Vectorizer in (CountVectorizer, HashingVectorizer):
wa = Vectorizer(strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon']
assert_equal(wa(text), expected)
text = "This is a test, really.\n\n I met Harry yesterday."
expected = ['this', 'is', 'test', 'really', 'met', 'harry',
'yesterday']
assert_equal(wa(text), expected)
wa = Vectorizer(input='file').build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['this', 'is', 'test', 'with', 'file', 'like',
'object']
assert_equal(wa(text), expected)
# with custom preprocessor
wa = Vectorizer(preprocessor=uppercase).build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
" c'\xe9tait pas tr\xeas bon.")
expected = ['AI', 'MANGE', 'DU', 'KANGOUROU', 'CE', 'MIDI',
'ETAIT', 'PAS', 'TRES', 'BON']
assert_equal(wa(text), expected)
# with custom tokenizer
wa = Vectorizer(tokenizer=split_tokenize,
strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ["j'ai", 'mange', 'du', 'kangourou', 'ce', 'midi,',
"c'etait", 'pas', 'tres', 'bon.']
assert_equal(wa(text), expected)
def test_word_analyzer_unigrams_and_bigrams():
wa = CountVectorizer(analyzer="word", strip_accents='unicode',
ngram_range=(1, 2)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon', 'ai mange', 'mange du',
'du kangourou', 'kangourou ce', 'ce midi', 'midi etait',
'etait pas', 'pas tres', 'tres bon']
assert_equal(wa(text), expected)
def test_unicode_decode_error():
# decode_error default to strict, so this should fail
# First, encode (as bytes) a unicode string.
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
text_bytes = text.encode('utf-8')
# Then let the Analyzer try to decode it as ascii. It should fail,
# because we have given it an incorrect encoding.
wa = CountVectorizer(ngram_range=(1, 2), encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, wa, text_bytes)
ca = CountVectorizer(analyzer='char', ngram_range=(3, 6),
encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, ca, text_bytes)
def test_char_ngram_analyzer():
cnga = CountVectorizer(analyzer='char', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon"
expected = ["j'a", "'ai", 'ai ', 'i m', ' ma']
assert_equal(cnga(text)[:5], expected)
expected = ['s tres', ' tres ', 'tres b', 'res bo', 'es bon']
assert_equal(cnga(text)[-5:], expected)
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
expected = [' yeste', 'yester', 'esterd', 'sterda', 'terday']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
def test_char_wb_ngram_analyzer():
cnga = CountVectorizer(analyzer='char_wb', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = [' th', 'thi', 'his', 'is ', ' thi']
assert_equal(cnga(text)[:5], expected)
expected = ['yester', 'esterd', 'sterda', 'terday', 'erday ']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char_wb',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("A test with a file-like object!")
expected = [' a ', ' te', 'tes', 'est', 'st ', ' tes']
assert_equal(cnga(text)[:6], expected)
def test_countvectorizer_custom_vocabulary():
vocab = {"pizza": 0, "beer": 1}
terms = set(vocab.keys())
# Try a few of the supported types.
for typ in [dict, list, iter, partial(defaultdict, int)]:
v = typ(vocab)
vect = CountVectorizer(vocabulary=v)
vect.fit(JUNK_FOOD_DOCS)
if isinstance(v, Mapping):
assert_equal(vect.vocabulary_, vocab)
else:
assert_equal(set(vect.vocabulary_), terms)
X = vect.transform(JUNK_FOOD_DOCS)
assert_equal(X.shape[1], len(terms))
def test_countvectorizer_custom_vocabulary_pipeline():
what_we_like = ["pizza", "beer"]
pipe = Pipeline([
('count', CountVectorizer(vocabulary=what_we_like)),
('tfidf', TfidfTransformer())])
X = pipe.fit_transform(ALL_FOOD_DOCS)
assert_equal(set(pipe.named_steps['count'].vocabulary_),
set(what_we_like))
assert_equal(X.shape[1], len(what_we_like))
def test_countvectorizer_custom_vocabulary_repeated_indeces():
vocab = {"pizza": 0, "beer": 0}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("vocabulary contains repeated indices", str(e).lower())
def test_countvectorizer_custom_vocabulary_gap_index():
vocab = {"pizza": 1, "beer": 2}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("doesn't contain index", str(e).lower())
def test_countvectorizer_stop_words():
cv = CountVectorizer()
cv.set_params(stop_words='english')
assert_equal(cv.get_stop_words(), ENGLISH_STOP_WORDS)
cv.set_params(stop_words='_bad_str_stop_')
assert_raises(ValueError, cv.get_stop_words)
cv.set_params(stop_words='_bad_unicode_stop_')
assert_raises(ValueError, cv.get_stop_words)
stoplist = ['some', 'other', 'words']
cv.set_params(stop_words=stoplist)
assert_equal(cv.get_stop_words(), set(stoplist))
def test_countvectorizer_empty_vocabulary():
try:
vect = CountVectorizer(vocabulary=[])
vect.fit(["foo"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
try:
v = CountVectorizer(max_df=1.0, stop_words="english")
# fit on stopwords only
v.fit(["to be or not to be", "and me too", "and so do you"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
def test_fit_countvectorizer_twice():
cv = CountVectorizer()
X1 = cv.fit_transform(ALL_FOOD_DOCS[:5])
X2 = cv.fit_transform(ALL_FOOD_DOCS[5:])
assert_not_equal(X1.shape[1], X2.shape[1])
def test_tf_idf_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# this is robust to features with only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
def test_tfidf_no_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# the lack of smoothing make IDF fragile in the presence of feature with
# only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
1. / np.array([0.])
numpy_provides_div0_warning = len(w) == 1
in_warning_message = 'divide by zero'
tfidf = assert_warns_message(RuntimeWarning, in_warning_message,
tr.fit_transform, X).toarray()
if not numpy_provides_div0_warning:
raise SkipTest("Numpy does not provide div 0 warnings.")
def test_sublinear_tf():
X = [[1], [2], [3]]
tr = TfidfTransformer(sublinear_tf=True, use_idf=False, norm=None)
tfidf = tr.fit_transform(X).toarray()
assert_equal(tfidf[0], 1)
assert_greater(tfidf[1], tfidf[0])
assert_greater(tfidf[2], tfidf[1])
assert_less(tfidf[1], 2)
assert_less(tfidf[2], 3)
def test_vectorizer():
# raw documents as an iterator
train_data = iter(ALL_FOOD_DOCS[:-1])
test_data = [ALL_FOOD_DOCS[-1]]
n_train = len(ALL_FOOD_DOCS) - 1
# test without vocabulary
v1 = CountVectorizer(max_df=0.5)
counts_train = v1.fit_transform(train_data)
if hasattr(counts_train, 'tocsr'):
counts_train = counts_train.tocsr()
assert_equal(counts_train[0, v1.vocabulary_["pizza"]], 2)
# build a vectorizer v1 with the same vocabulary as the one fitted by v1
v2 = CountVectorizer(vocabulary=v1.vocabulary_)
# compare that the two vectorizer give the same output on the test sample
for v in (v1, v2):
counts_test = v.transform(test_data)
if hasattr(counts_test, 'tocsr'):
counts_test = counts_test.tocsr()
vocabulary = v.vocabulary_
assert_equal(counts_test[0, vocabulary["salad"]], 1)
assert_equal(counts_test[0, vocabulary["tomato"]], 1)
assert_equal(counts_test[0, vocabulary["water"]], 1)
# stop word from the fixed list
assert_false("the" in vocabulary)
# stop word found automatically by the vectorizer DF thresholding
# words that are high frequent across the complete corpus are likely
# to be not informative (either real stop words of extraction
# artifacts)
assert_false("copyright" in vocabulary)
# not present in the sample
assert_equal(counts_test[0, vocabulary["coke"]], 0)
assert_equal(counts_test[0, vocabulary["burger"]], 0)
assert_equal(counts_test[0, vocabulary["beer"]], 0)
assert_equal(counts_test[0, vocabulary["pizza"]], 0)
# test tf-idf
t1 = TfidfTransformer(norm='l1')
tfidf = t1.fit(counts_train).transform(counts_train).toarray()
assert_equal(len(t1.idf_), len(v1.vocabulary_))
assert_equal(tfidf.shape, (n_train, len(v1.vocabulary_)))
# test tf-idf with new data
tfidf_test = t1.transform(counts_test).toarray()
assert_equal(tfidf_test.shape, (len(test_data), len(v1.vocabulary_)))
# test tf alone
t2 = TfidfTransformer(norm='l1', use_idf=False)
tf = t2.fit(counts_train).transform(counts_train).toarray()
assert_equal(t2.idf_, None)
# test idf transform with unlearned idf vector
t3 = TfidfTransformer(use_idf=True)
assert_raises(ValueError, t3.transform, counts_train)
# test idf transform with incompatible n_features
X = [[1, 1, 5],
[1, 1, 0]]
t3.fit(X)
X_incompt = [[1, 3],
[1, 3]]
assert_raises(ValueError, t3.transform, X_incompt)
# L1-normalized term frequencies sum to one
assert_array_almost_equal(np.sum(tf, axis=1), [1.0] * n_train)
# test the direct tfidf vectorizer
# (equivalent to term count vectorizer + tfidf transformer)
train_data = iter(ALL_FOOD_DOCS[:-1])
tv = TfidfVectorizer(norm='l1')
tv.max_df = v1.max_df
tfidf2 = tv.fit_transform(train_data).toarray()
assert_false(tv.fixed_vocabulary_)
assert_array_almost_equal(tfidf, tfidf2)
# test the direct tfidf vectorizer with new data
tfidf_test2 = tv.transform(test_data).toarray()
assert_array_almost_equal(tfidf_test, tfidf_test2)
# test transform on unfitted vectorizer with empty vocabulary
v3 = CountVectorizer(vocabulary=None)
assert_raises(ValueError, v3.transform, train_data)
# ascii preprocessor?
v3.set_params(strip_accents='ascii', lowercase=False)
assert_equal(v3.build_preprocessor(), strip_accents_ascii)
# error on bad strip_accents param
v3.set_params(strip_accents='_gabbledegook_', preprocessor=None)
assert_raises(ValueError, v3.build_preprocessor)
# error with bad analyzer type
v3.set_params = '_invalid_analyzer_type_'
assert_raises(ValueError, v3.build_analyzer)
def test_tfidf_vectorizer_setters():
tv = TfidfVectorizer(norm='l2', use_idf=False, smooth_idf=False,
sublinear_tf=False)
tv.norm = 'l1'
assert_equal(tv._tfidf.norm, 'l1')
tv.use_idf = True
assert_true(tv._tfidf.use_idf)
tv.smooth_idf = True
assert_true(tv._tfidf.smooth_idf)
tv.sublinear_tf = True
assert_true(tv._tfidf.sublinear_tf)
def test_hashing_vectorizer():
v = HashingVectorizer()
X = v.transform(ALL_FOOD_DOCS)
token_nnz = X.nnz
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# By default the hashed values receive a random sign and l2 normalization
# makes the feature values bounded
assert_true(np.min(X.data) > -1)
assert_true(np.min(X.data) < 0)
assert_true(np.max(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 2), 1.0)
# Check vectorization with some non-default parameters
v = HashingVectorizer(ngram_range=(1, 2), non_negative=True, norm='l1')
X = v.transform(ALL_FOOD_DOCS)
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# ngrams generate more non zeros
ngrams_nnz = X.nnz
assert_true(ngrams_nnz > token_nnz)
assert_true(ngrams_nnz < 2 * token_nnz)
# makes the feature values bounded
assert_true(np.min(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 1), 1.0)
def test_feature_names():
cv = CountVectorizer(max_df=0.5)
# test for Value error on unfitted/empty vocabulary
assert_raises(ValueError, cv.get_feature_names)
X = cv.fit_transform(ALL_FOOD_DOCS)
n_samples, n_features = X.shape
assert_equal(len(cv.vocabulary_), n_features)
feature_names = cv.get_feature_names()
assert_equal(len(feature_names), n_features)
assert_array_equal(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'],
feature_names)
for idx, name in enumerate(feature_names):
assert_equal(idx, cv.vocabulary_.get(name))
def test_vectorizer_max_features():
vec_factories = (
CountVectorizer,
TfidfVectorizer,
)
expected_vocabulary = set(['burger', 'beer', 'salad', 'pizza'])
expected_stop_words = set([u'celeri', u'tomato', u'copyright', u'coke',
u'sparkling', u'water', u'the'])
for vec_factory in vec_factories:
# test bounded number of extracted features
vectorizer = vec_factory(max_df=0.6, max_features=4)
vectorizer.fit(ALL_FOOD_DOCS)
assert_equal(set(vectorizer.vocabulary_), expected_vocabulary)
assert_equal(vectorizer.stop_words_, expected_stop_words)
def test_count_vectorizer_max_features():
# Regression test: max_features didn't work correctly in 0.14.
cv_1 = CountVectorizer(max_features=1)
cv_3 = CountVectorizer(max_features=3)
cv_None = CountVectorizer(max_features=None)
counts_1 = cv_1.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_3 = cv_3.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_None = cv_None.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
features_1 = cv_1.get_feature_names()
features_3 = cv_3.get_feature_names()
features_None = cv_None.get_feature_names()
# The most common feature is "the", with frequency 7.
assert_equal(7, counts_1.max())
assert_equal(7, counts_3.max())
assert_equal(7, counts_None.max())
# The most common feature should be the same
assert_equal("the", features_1[np.argmax(counts_1)])
assert_equal("the", features_3[np.argmax(counts_3)])
assert_equal("the", features_None[np.argmax(counts_None)])
def test_vectorizer_max_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', max_df=1.0)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.max_df = 0.5 # 0.5 * 3 documents -> max_doc_count == 1.5
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
vect.max_df = 1
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
def test_vectorizer_min_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', min_df=1)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.min_df = 2
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdt} ignored
assert_equal(len(vect.vocabulary_.keys()), 2) # {ae} remain
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 4)
vect.min_df = 0.8 # 0.8 * 3 documents -> min_doc_count == 2.4
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdet} ignored
assert_equal(len(vect.vocabulary_.keys()), 1) # {a} remains
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 5)
def test_count_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = CountVectorizer(analyzer='char', max_df=1.0)
X = vect.fit_transform(test_data).toarray()
assert_array_equal(['a', 'b', 'c', 'd', 'e'], vect.get_feature_names())
assert_array_equal([[3, 1, 1, 0, 0],
[1, 2, 0, 1, 1]], X)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = CountVectorizer(analyzer='char', max_df=1.0, binary=True)
X = vect.fit_transform(test_data).toarray()
assert_array_equal([[1, 1, 1, 0, 0],
[1, 1, 0, 1, 1]], X)
# check the ability to change the dtype
vect = CountVectorizer(analyzer='char', max_df=1.0,
binary=True, dtype=np.float32)
X_sparse = vect.fit_transform(test_data)
assert_equal(X_sparse.dtype, np.float32)
def test_hashed_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = HashingVectorizer(analyzer='char', non_negative=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X[0:1].data), 3)
assert_equal(np.max(X[1:2].data), 2)
assert_equal(X.dtype, np.float64)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X.data), 1)
assert_equal(X.dtype, np.float64)
# check the ability to change the dtype
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None, dtype=np.float64)
X = vect.transform(test_data)
assert_equal(X.dtype, np.float64)
def test_vectorizer_inverse_transform():
# raw documents
data = ALL_FOOD_DOCS
for vectorizer in (TfidfVectorizer(), CountVectorizer()):
transformed_data = vectorizer.fit_transform(data)
inversed_data = vectorizer.inverse_transform(transformed_data)
analyze = vectorizer.build_analyzer()
for doc, inversed_terms in zip(data, inversed_data):
terms = np.sort(np.unique(analyze(doc)))
inversed_terms = np.sort(np.unique(inversed_terms))
assert_array_equal(terms, inversed_terms)
# Test that inverse_transform also works with numpy arrays
transformed_data = transformed_data.toarray()
inversed_data2 = vectorizer.inverse_transform(transformed_data)
for terms, terms2 in zip(inversed_data, inversed_data2):
assert_array_equal(np.sort(terms), np.sort(terms2))
def test_count_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.2, random_state=0)
pipeline = Pipeline([('vect', CountVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'svc__loss': ('hinge', 'squared_hinge')
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
def test_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.1, random_state=0)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'vect__norm': ('l1', 'l2'),
'svc__loss': ('hinge', 'squared_hinge'),
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
assert_equal(best_vectorizer.norm, 'l2')
assert_false(best_vectorizer.fixed_vocabulary_)
def test_vectorizer_pipeline_cross_validation():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
cv_scores = cross_val_score(pipeline, data, target, cv=3)
assert_array_equal(cv_scores, [1., 1., 1.])
def test_vectorizer_unicode():
# tests that the count vectorizer works with cyrillic.
document = (
"\xd0\x9c\xd0\xb0\xd1\x88\xd0\xb8\xd0\xbd\xd0\xbd\xd0\xbe\xd0"
"\xb5 \xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb5\xd0\xbd\xd0\xb8\xd0"
"\xb5 \xe2\x80\x94 \xd0\xbe\xd0\xb1\xd1\x88\xd0\xb8\xd1\x80\xd0\xbd"
"\xd1\x8b\xd0\xb9 \xd0\xbf\xd0\xbe\xd0\xb4\xd1\x80\xd0\xb0\xd0\xb7"
"\xd0\xb4\xd0\xb5\xd0\xbb \xd0\xb8\xd1\x81\xd0\xba\xd1\x83\xd1\x81"
"\xd1\x81\xd1\x82\xd0\xb2\xd0\xb5\xd0\xbd\xd0\xbd\xd0\xbe\xd0\xb3"
"\xd0\xbe \xd0\xb8\xd0\xbd\xd1\x82\xd0\xb5\xd0\xbb\xd0\xbb\xd0"
"\xb5\xd0\xba\xd1\x82\xd0\xb0, \xd0\xb8\xd0\xb7\xd1\x83\xd1\x87"
"\xd0\xb0\xd1\x8e\xd1\x89\xd0\xb8\xd0\xb9 \xd0\xbc\xd0\xb5\xd1\x82"
"\xd0\xbe\xd0\xb4\xd1\x8b \xd0\xbf\xd0\xbe\xd1\x81\xd1\x82\xd1\x80"
"\xd0\xbe\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f \xd0\xb0\xd0\xbb\xd0\xb3"
"\xd0\xbe\xd1\x80\xd0\xb8\xd1\x82\xd0\xbc\xd0\xbe\xd0\xb2, \xd1\x81"
"\xd0\xbf\xd0\xbe\xd1\x81\xd0\xbe\xd0\xb1\xd0\xbd\xd1\x8b\xd1\x85 "
"\xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb0\xd1\x82\xd1\x8c\xd1\x81\xd1"
"\x8f.")
vect = CountVectorizer()
X_counted = vect.fit_transform([document])
assert_equal(X_counted.shape, (1, 15))
vect = HashingVectorizer(norm=None, non_negative=True)
X_hashed = vect.transform([document])
assert_equal(X_hashed.shape, (1, 2 ** 20))
# No collisions on such a small dataset
assert_equal(X_counted.nnz, X_hashed.nnz)
# When norm is None and non_negative, the tokens are counted up to
# collisions
assert_array_equal(np.sort(X_counted.data), np.sort(X_hashed.data))
def test_tfidf_vectorizer_with_fixed_vocabulary():
# non regression smoke test for inheritance issues
vocabulary = ['pizza', 'celeri']
vect = TfidfVectorizer(vocabulary=vocabulary)
X_1 = vect.fit_transform(ALL_FOOD_DOCS)
X_2 = vect.transform(ALL_FOOD_DOCS)
assert_array_almost_equal(X_1.toarray(), X_2.toarray())
assert_true(vect.fixed_vocabulary_)
def test_pickling_vectorizer():
instances = [
HashingVectorizer(),
HashingVectorizer(norm='l1'),
HashingVectorizer(binary=True),
HashingVectorizer(ngram_range=(1, 2)),
CountVectorizer(),
CountVectorizer(preprocessor=strip_tags),
CountVectorizer(analyzer=lazy_analyze),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS),
TfidfVectorizer(),
TfidfVectorizer(analyzer=lazy_analyze),
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
]
for orig in instances:
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_equal(copy.get_params(), orig.get_params())
assert_array_equal(
copy.fit_transform(JUNK_FOOD_DOCS).toarray(),
orig.fit_transform(JUNK_FOOD_DOCS).toarray())
def test_countvectorizer_vocab_sets_when_pickling():
# ensure that vocabulary of type set is coerced to a list to
# preserve iteration ordering after deserialization
rng = np.random.RandomState(0)
vocab_words = np.array(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'])
for x in range(0, 100):
vocab_set = set(choice(vocab_words, size=5, replace=False,
random_state=rng))
cv = CountVectorizer(vocabulary=vocab_set)
unpickled_cv = pickle.loads(pickle.dumps(cv))
cv.fit(ALL_FOOD_DOCS)
unpickled_cv.fit(ALL_FOOD_DOCS)
assert_equal(cv.get_feature_names(), unpickled_cv.get_feature_names())
def test_countvectorizer_vocab_dicts_when_pickling():
rng = np.random.RandomState(0)
vocab_words = np.array(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'])
for x in range(0, 100):
vocab_dict = dict()
words = choice(vocab_words, size=5, replace=False, random_state=rng)
for y in range(0, 5):
vocab_dict[words[y]] = y
cv = CountVectorizer(vocabulary=vocab_dict)
unpickled_cv = pickle.loads(pickle.dumps(cv))
cv.fit(ALL_FOOD_DOCS)
unpickled_cv.fit(ALL_FOOD_DOCS)
assert_equal(cv.get_feature_names(), unpickled_cv.get_feature_names())
def test_stop_words_removal():
# Ensure that deleting the stop_words_ attribute doesn't affect transform
fitted_vectorizers = (
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS)
)
for vect in fitted_vectorizers:
vect_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
vect.stop_words_ = None
stop_None_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
delattr(vect, 'stop_words_')
stop_del_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
assert_array_equal(stop_None_transform, vect_transform)
assert_array_equal(stop_del_transform, vect_transform)
def test_pickling_transformer():
X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS)
orig = TfidfTransformer().fit(X)
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_array_equal(
copy.fit_transform(X).toarray(),
orig.fit_transform(X).toarray())
def test_non_unique_vocab():
vocab = ['a', 'b', 'c', 'a', 'a']
vect = CountVectorizer(vocabulary=vocab)
assert_raises(ValueError, vect.fit, [])
def test_hashingvectorizer_nan_in_docs():
# np.nan can appear when using pandas to load text fields from a csv file
# with missing values.
message = "np.nan is an invalid document, expected byte or unicode string."
exception = ValueError
def func():
hv = HashingVectorizer()
hv.fit_transform(['hello world', np.nan, 'hello hello'])
assert_raise_message(exception, message, func)
def test_tfidfvectorizer_binary():
# Non-regression test: TfidfVectorizer used to ignore its "binary" param.
v = TfidfVectorizer(binary=True, use_idf=False, norm=None)
assert_true(v.binary)
X = v.fit_transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X.ravel(), [1, 1, 1, 0])
X2 = v.transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X2.ravel(), [1, 1, 1, 0])
def test_tfidfvectorizer_export_idf():
vect = TfidfVectorizer(use_idf=True)
vect.fit(JUNK_FOOD_DOCS)
assert_array_almost_equal(vect.idf_, vect._tfidf.idf_)
def test_vectorizer_vocab_clone():
vect_vocab = TfidfVectorizer(vocabulary=["the"])
vect_vocab_clone = clone(vect_vocab)
vect_vocab.fit(ALL_FOOD_DOCS)
vect_vocab_clone.fit(ALL_FOOD_DOCS)
assert_equal(vect_vocab_clone.vocabulary_, vect_vocab.vocabulary_)
| bsd-3-clause |
rajul/tvb-library | tvb/analyzers/node_coherence.py | 3 | 6727 | # -*- coding: utf-8 -*-
#
#
# TheVirtualBrain-Scientific Package. This package holds all simulators, and
# analysers necessary to run brain-simulations. You can use it stand alone or
# in conjunction with TheVirtualBrain-Framework Package. See content of the
# documentation-folder for more details. See also http://www.thevirtualbrain.org
#
# (c) 2012-2013, Baycrest Centre for Geriatric Care ("Baycrest")
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 2 as published by the Free
# Software Foundation. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details. You should have received a copy of the GNU General
# Public License along with this program; if not, you can download it here
# http://www.gnu.org/licenses/old-licenses/gpl-2.0
#
#
# CITATION:
# When using The Virtual Brain for scientific publications, please cite it as follows:
#
# Paula Sanz Leon, Stuart A. Knock, M. Marmaduke Woodman, Lia Domide,
# Jochen Mersmann, Anthony R. McIntosh, Viktor Jirsa (2013)
# The Virtual Brain: a simulator of primate brain network dynamics.
# Frontiers in Neuroinformatics (7:10. doi: 10.3389/fninf.2013.00010)
#
#
"""
Compute cross coherence between all nodes in a time series.
.. moduleauthor:: Stuart A. Knock <[email protected]>
.. moduleauthor:: Marmaduke Woodman <[email protected]>
"""
import numpy
import matplotlib.mlab as mlab
from matplotlib.pylab import detrend_linear
#TODO: Currently built around the Simulator's 4D timeseries -- generalise...
import tvb.datatypes.time_series as time_series
import tvb.datatypes.spectral as spectral
import tvb.basic.traits.core as core
import tvb.basic.traits.types_basic as basic
import tvb.basic.traits.util as util
from tvb.basic.logger.builder import get_logger
LOG = get_logger(__name__)
#TODO: Make an appropriate spectral datatype for the output
#TODO: Should do this properly, ie not with mlab, returning both coherence and
# the complex coherence spectra, then supporting magnitude squared
# coherence, etc in a similar fashion to the FourierSpectrum datatype...
def hamming(M, sym=True):
"""
The M-point Hamming window.
From scipy.signal
"""
if M < 1:
return numpy.array([])
if M == 1:
return numpy.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = numpy.arange(0, M)
w = 0.54 - 0.46 * numpy.cos(2.0 * numpy.pi * n / (M - 1))
if not sym and not odd:
w = w[:-1]
return w
def coherence_mlab(data, sample_rate, nfft=256):
_, nsvar, nnode, nmode = data.shape
# (frequency, nodes, nodes, state-variables, modes)
coh_shape = nfft/2 + 1, nnode, nnode, nsvar, nmode
LOG.info("coh shape will be: %s" % (coh_shape, ))
coh = numpy.zeros(coh_shape)
for mode in range(nmode):
for var in range(nsvar):
data = data[:, var, :, mode].copy()
data -= data.mean(axis=0)[numpy.newaxis, :]
for n1 in range(nnode):
for n2 in range(nnode):
cxy, freq = mlab.cohere(data[:, n1], data[:, n2],
NFFT=nfft,
Fs=sample_rate,
detrend=detrend_linear,
window=mlab.window_none)
coh[:, n1, n2, var, mode] = cxy
return coh, freq
def coherence(data, sample_rate, nfft=256, imag=False):
"Vectorized coherence calculation by windowed FFT"
nt, ns, nn, nm = data.shape
nwin = nt / nfft
if nwin < 1:
raise ValueError(
"Not enough time points ({0}) to compute an FFT, given a "
"window size of nfft={1}.".format(nt, nfft))
# ignore leftover data; need shape (nn, ... , nwin, nfft)
wins = data[:nwin * nfft]\
.copy()\
.transpose((2, 1, 3, 0))\
.reshape((nn, ns, nm, nwin, nfft))
wins *= hamming(nfft)
F = numpy.fft.fft(wins)
fs = numpy.fft.fftfreq(nfft, 1e3 / sample_rate)
# broadcasts to [node_i, node_j, ..., window, time]
G = F[:, numpy.newaxis] * F.conj()
if imag:
G = G.imag
dG = numpy.array([G[i, i] for i in range(nn)])
C = (numpy.abs(G)**2 / (dG[:, numpy.newaxis] * dG)).mean(axis=-2)
mask = fs > 0.0
# C_ = numpy.abs(C.mean(axis=0).mean(axis=0))
return numpy.transpose(C[..., mask], (4, 0, 1, 2, 3)), fs[mask]
class NodeCoherence(core.Type):
"Adapter for cross-coherence algorithm(s)"
time_series = time_series.TimeSeries(
label="Time Series",
required=True,
doc="""The timeseries to which the FFT is to be applied.""")
nfft = basic.Integer(
label="Data-points per block",
default=256,
doc="""Should be a power of 2...""")
def evaluate(self):
"Evaluate coherence on time series."
cls_attr_name = self.__class__.__name__+".time_series"
self.time_series.trait["data"].log_debug(owner=cls_attr_name)
srate = self.time_series.sample_rate
coh, freq = coherence(self.time_series.data, srate, nfft=self.nfft)
util.log_debug_array(LOG, coh, "coherence")
util.log_debug_array(LOG, freq, "freq")
spec = spectral.CoherenceSpectrum(
source=self.time_series,
nfft=self.nfft,
array_data=coh,
frequency=freq,
use_storage=False)
return spec
def result_shape(self, input_shape):
"""Returns the shape of the main result of NodeCoherence."""
freq_len = self.nfft/2 + 1
freq_shape = (freq_len,)
result_shape = (freq_len, input_shape[2], input_shape[2], input_shape[1], input_shape[3])
return [result_shape, freq_shape]
def result_size(self, input_shape):
"""
Returns the storage size in Bytes of the main result of NodeCoherence.
"""
# TODO This depends on input array dtype!
result_size = numpy.sum(map(numpy.prod, self.result_shape(input_shape))) * 8.0 #Bytes
return result_size
def extended_result_size(self, input_shape):
"""
Returns the storage size in Bytes of the extended result of the FFT.
That is, it includes storage of the evaluated FourierSpectrum attributes
such as power, phase, amplitude, etc.
"""
extend_size = self.result_size(input_shape) #Currently no derived attributes.
return extend_size
| gpl-2.0 |
kensugino/jGEM | jgem/evaluate.py | 1 | 33948 | """
.. module:: evaluate
:synopsis: evaluate performance by comparing to a reference annotation
.. moduleauthor:: Ken Sugino <[email protected]>
"""
# system imports
import gzip
import os
import subprocess
from collections import Counter
from operator import iadd
import logging
logging.basicConfig(level=logging.DEBUG)
LOG = logging.getLogger(__name__)
import json
# 3rd party libraries
import pandas as PD
import numpy as N
import matplotlib.pyplot as P
# library imports
from jgem import utils as UT
from jgem import gtfgffbed as GGB
from jgem import bedtools as BT
from jgem import bigwig as BW
from jgem import filenames as FN
from jgem import calccov as CC
class EvalNames(FN.FileNamesBase):
"""Filename manager for evaluation process.
Attributes:
sjexbase: path prefix to junction, exon files (\*.sj.txt.gz and \*.ex.txt.gz)
code: assembly identifier
outdir: output directory
All outputs and temporary files are prefixed by **outdir/code**
"""
def __init__(self, sjexbase, code, outdir):
self.sjexbase = sjexbase
self.code = code
self.outdir = outdir
for x in ['sj','ex','ci']:
setattr(self, x+'path', '{0}.{1}.txt.gz'.format(sjexbase,x))
prefix = os.path.join(outdir, code)
super(EvalNames, self).__init__(prefix)
def fname2(self, suffix, code2, category='temp'):
"""Generate filenames furthre prefixed by code2.
Args:
suffix: (str)
code2: (str) identifier (comparison target)
category: (str)
Returns:
(outdir)/(code).(code2).(suffix)
"""
suf = '{0}.{1}'.format(code2, suffix)
return self.fname(suf, category)
def modelpath(self, which, code2=None):
"""Returns path to junction(sj)/exon(ex)/choppedinterval(ci) file.
Args:
which: one of 'sj','ex','ci'
"""
path = '{0}.{1}.txt.gz'.format(self.sjexbase, which)
if code2 is None:
return path
path2 = self.fname2('{0}.txt.gz'.format(which),code2, category='read')
if os.path.exists(path2):
return path2
return path
def model(self, which, code2=None):
"""Returns model dataframe (junction/exon/chopped intervals).
Args:
which: one of 'sj','ex', 'ci'
"""
if hasattr(self, which): # cached
return getattr(self, which)
path = self.modelpath(which, code2)
if os.path.exists(path): # file exists
if which == 'ci':
df = GGB.read_bed(path)
else:
df = UT.read_pandas(path)
setattr(self, which, df)
return df
# file does not exists, if ci then make from ex
if which=='ci':
expath = self.modelpath('ex', code2)
if os.path.exists(expath):
self.ci = UT.chopintervals(self.model('ex'), path)
else:
raise RuntimeError('file {0} does not exist'.format(expath))
else:
raise RuntimeError('file {0} does not exist'.format(path))
def savemodel(self, which, code2=None, category='temp'):
"""Save model. If code2 is None, overwrite original, if code2 is provided,
writes to outdir/(code).(code2).(which).txt.gz.
Args:
which: 'sj','ex','ci'
code2: 2nd identifier
category: filename category (default 'temp')
Returns:
file path or None (if model is not loaded)
"""
if hasattr(self, which):
if code2 is None:
path = self.modelpath(which, None)
else:
path = self.fname2('{0}.txt.gz'.format(which),code2, category=category)
return UT.write_pandas(getattr(self, which), path, 'h')
return None
WSDEFAULT = ['i',('5','5b'),('3','3b'),('s','sb'),'j']
WSDEFAULT1 = ['i','5','3','s','j']
WSDEFAULT2 = ['i','5','5b','3','3b','s','sb','j']
WSDEFAULT3 = ['i','5b','3b','sb','j']
class EvalMatch(object):
"""Compare two models against a genome coverage (bigwig)
and junction counts (sjfile).
Usage:
>>> en1 = EvalNames(sjexpre_to_ref, 'ref', outdir)
>>> en2 = EvalNames(sjexpre_to_target, 'tgt', outdir)
>>> em = EvalMatch(en1,en2,bwfile,sjfile,datacode)
>>> figs = em.calculate(outdir)
"""
abbr = {'i':'internal exons',
'5':"5' exons",
'5b':"5' exons (b)",
'3':"3' exons",
'3b':"3' exons (b)",
's':'single exons',
'sb':'single exons (b)',
'j':'junctions'}
def __init__(self, en1, en2, bigwig, sjfile, datacode, binsize=500,
exclude_se_from_completeness=True):
"""
Args:
en1: EvalNames object, reference
en2: EvalNames object, sensitivity of this model against en1 is calculated
bigwig: path to normalized bigwig coverage file
sjfile: path to normalized junction counts file
datacode: code indicating data (bigwig & sjfile)
binsize (int): for sensitivity plot (default 1000)
"""
self.en1 = en1
self.en2 = en2
self.bigwig = bigwig
self.sjfile = sjfile
self.datacode = datacode
self.closest = {} # holds closest exon matched (for 5',3',single exons)
self.stats = {'code1':en1.code, 'code2':en2.code, 'datacode':datacode,
'binsize':binsize,'bigwig':bigwig, 'sjfile':sjfile}
self.ratios = {} # holds dataframes of cov(x) and ratio(y)
self.binsize = binsize
self.exclude_se_from_completeness = exclude_se_from_completeness
def calculate(self, np=3, saveintermediates=False):
"""Calculate necessary data.
1. for en1 and en2 calculate ecov,gcov,jcnt (prep_sjex)
2. calculate match between en1 and en2 (find_match)
3. calculate length ratio, detected numbers, sensitivity, etc. (calc_stats)
"""
# calc exon, junction, gene coverage
self.prep_sjex(self.en1, np, True, True)
self.prep_sjex(self.en2, np, True, False)
# register for deleting later, keep ref calc
dcode = self.datacode
# self.en2.fname2('covci.txt.gz',dcode)
# self.en2.fname2('ecov.txt.gz',dcode)
# self.en2.fname2('gcov.txt.gz',dcode)
self.find_match()
self.calc_stats()
self.calc_completeness()
if not saveintermediates:
self.en1.delete(['temp'],['output','read'])
self.en2.delete(['temp'],['output','read'])
self.save()
def save(self):
# [i,5,5b,3,3b,s,sb,j,glc,ecc,jcc]
# light weight stats also usable from others ==> dict
# auc, detected1, ..., sigmoid,...,maxx,avgx,avgy,...
# ==> pickle or json
decode = '{0}.{1}'.format(self.en1.code, self.datacode)
fname1 = self.en2.fname2('stats.json',decode,category='output')
UT.makedirs(os.path.dirname(fname1))
with open(fname1,'w') as fp:
json.dump(self.stats, fp)
# [i,5,5b,3,3b,s,sb,j] cov(x),ratio(y) => in a dataframe
# [glc,ecc,jcc] gcov(x), ratio(y) => in a dataframe
# ==> put all in one four column dataframe (kind, id, x, y)
fname2 = self.en2.fname2('ratios.txt.gz',decode,category='output')
for k, v in self.ratios.items():
v['kind'] = k
df = PD.concat(self.ratios.values(), ignore_index=True)
UT.write_pandas(df, fname2, 'h')
# DP
dp = self.get_detection_percentages()
fname3 = self.en2.fname2('dp.txt.gz', decode,category='output')
UT.write_pandas(dp, fname3, 'ih')
def load(self):
decode = '{0}.{1}'.format(self.en1.code, self.datacode)
fname1 = self.en2.fname2('stats.json',decode,category='output')
with open(fname1,'r') as fp:
self.stats = json.load(fp)
fname2 = self.en2.fname2('ratios.txt.gz',decode,category='output')
df = UT.read_pandas(fname2)
for k in df['kind'].unique():
self.ratios[k] = df[df['kind']==k][['x','y']]
def colname(self, x):
return '{0}_{1}'.format(x, self.datacode)
def colname2(self, x, code):
return '{0}_{1}_{2}'.format(x, self.datacode, code)
def prep_sjex(self, en, np=1, savesjex=True, calccovs=True):
""" Assign ecov, gcov, jcnt """
dcode = self.datacode
sj = en.model('sj',dcode)
ex = en.model('ex',dcode)
savesj = False
saveex = False
# check support
if len(sj)>0:
dids = set(ex['d_id'].values)
aids = set(ex['a_id'].values)
idx = sj['a_id'].isin(aids) & sj['d_id'].isin(dids)
sj = sj[idx].copy()
en.sj = sj
if '_id' not in ex.columns: # edge case (len(sj)==0)
ex['_id'] = N.arange(len(ex))
if '_gidx' not in ex.columns: # edge case (len(sj)==0)
ex['_gidx'] = N.arange(len(ex))
# length
if 'len' not in sj.columns:
sj['len'] = sj['ed'] - sj['st']
savesj = True
if 'len' not in ex.columns:
ex['len'] = ex['ed'] - ex['st']
saveex = True
# ecov
if calccovs:
print('calccov for {0}'.format(en.code))
ecovname = self.colname('ecov')
if ecovname not in ex.columns:
ecov = CC.calc_ecov(
expath=en.modelpath('ex'),
cipath=en.modelpath('ci'),
bwpath=self.bigwig,
dstprefix=en.fname2('',self.datacode), # cov is data dependent
override=False, # override previous?
np=np)
ex[ecovname] = ecov.set_index('eid').ix[ex['_id'].values]['ecov'].values
saveex = True
# gcov, glen
gcovname = self.colname('gcov')
if gcovname not in ex.columns:
gcov = CC.calc_gcov(
expath=en.modelpath('ex'),
cipath=en.modelpath('ci'),
bwpath=self.bigwig,
dstprefix=en.fname2('',self.datacode),
override=False, # reuse covci from ecov calc
np=np)
tmp = gcov.set_index('_gidx').ix[ex['_gidx'].values]
ex[gcovname] = tmp['gcov'].values
if 'glen' in tmp:
ex['glen'] = tmp['glen'].values # glen is only dependent on model not data
saveex = True
else:
ecovname = self.colname('ecov')
if ecovname not in ex.columns:
ex[ecovname] = 0
gcovname = self.colname('gcov')
if gcovname not in ex.columns:
ex[gcovname] = 0
# sjcnt
ucntname = self.colname('ucnt')
mcntname = self.colname('mcnt')
jcntname = self.colname('jcnt')
sjfile = self.sjfile
if ucntname not in sj.columns:
if sjfile.endswith('.bed') or sjfile.endswith('.bed.gz'): # no header
dsj = UT.read_pandas(sjfile, names=['chr','st','ed','name','ucnt','strand','mcnt'])
else: # assume txt file with header
dsj = UT.read_pandas(sjfile)
# locus based matching
dsj['locus'] = UT.calc_locus_strand(dsj)
sj['locus'] = UT.calc_locus_strand(sj)
l2u = UT.df2dict(dsj, 'locus', 'ucnt')
l2m = UT.df2dict(dsj, 'locus', 'mcnt')
sj[ucntname] = [l2u.get(x,0) for x in sj['locus']]
sj[mcntname] = [l2m.get(x,0) for x in sj['locus']]
sj[jcntname] = [x or y for x,y in sj[[ucntname,mcntname]].values]
savesj = True
if saveex and savesjex:
en.savemodel('ex',dcode, category='output')
if savesj and savesjex:
en.savemodel('sj',dcode, category='output')
def find_match(self):
en1 = self.en1
en2 = self.en2
# write internal,3,5,se exons separately for finding match
a = en1.fname2('emtmp.ex.bed.gz', en2.code) # need to be unique to avoid parallel conflict (en1 ref shared)
b = en2.fname('emtmp.ex.bed.gz')
c = en1.fname2('emtmp.ex.ovl.txt.gz', en2.code)
self.e1 = e1 = en1.model('ex')
self.e2 = e2 = en2.model('ex')
ecovname = self.colname('ecov')
cols = ['chr','st','ed','cat','_id',ecovname,'_gidx','len','strand']
a = UT.write_pandas(e1[cols],a,'')
b = UT.write_pandas(e2[cols],b,'')
c = BT.bedtoolintersect(a,b,c,wao=True)
ocols = cols + ['b_'+x for x in cols] + ['ovl']
self.ov = ov = UT.read_pandas(c, names=ocols) # overlaps of exons
idxchr = ov['chr']==ov['b_chr'] # str vs. str
idxstrand = ov['strand']==ov['b_strand'] # str vs. str
idxp = (ov['strand']=='+')&idxstrand
idxn = (ov['strand']=='-')&idxstrand
idxst = ov['st']==ov['b_st'] # b_st column mixed? type?
idxed = ov['ed']==ov['b_ed'] # b_ed column mixed? type?
idxcat = ov['cat']==ov['b_cat']
idxcov = ov[ecovname]>0 # exons with reads
LOG.debug('='*10 + 'calculating match between {0} and {1}'.format(en1.code, en2.code))
LOG.debug('len(ov):{0}'.format(len(ov)))
for k in ['idxchr','idxstrand','idxp','idxn','idxst','idxed','idxcat','idxcov']:
v = locals()[k]
LOG.debug('#{0}:{1}'.format(k, N.sum(v)))
# internal exon cat='i' and chr,st,ed,strand match
self.ei = ei = ov[idxchr&idxstrand&idxst&idxed&idxcat&(ov['cat']=='i')].copy()
# 5' cat='5' and chr,donor (+,ed)|(-,st) match, find closest
self.e5 = e5 = ov[idxchr&((idxp&idxed)|(idxn&idxst))&idxcat&(ov['cat']=='5')].copy()
# 3' cat='3' and chr,acceptor (+,st)|(-,ed) match
self.e3 = e3 = ov[idxchr&((idxn&idxed)|(idxp&idxst))&idxcat&(ov['cat']=='3')] .copy()
# se cat='s' and chr,
self.es = es = ov[idxchr&(ov['cat']=='s')&idxcat].copy()
# allow overlap to ther categories
self.e5b = e5b = ov[idxchr&((idxp&idxed)|(idxn&idxst))&(ov['cat']=='5')].copy()
# 3' cat='3' and chr,acceptor (+,st)|(-,ed) match
self.e3b = e3b = ov[idxchr&((idxn&idxed)|(idxp&idxst))&(ov['cat']=='3')] .copy()
# se cat='s' and chr,
self.esb = esb = ov[idxchr&(ov['cat']=='s')].copy()
# splice junction
self.s1 = s1 = en1.model('sj')
self.s2 = s2 = en2.model('sj')
jcntname = self.colname('jcnt')
l2c = UT.df2dict(s2, 'locus',jcntname)
jhitname = self.colname2('jhit', en2.code)
s1[jhitname] = [l2c.get(x,0) for x in s1['locus']] # corresponding s2 count
self.sj= sj = s1[s1[jhitname]>0].copy() # only consider s2 count > 0
# for batch processing
self.e = {'i':ei,'5':e5,'3':e3,'s':es, 'j':sj, '5b':e5b, '3b':e3b, 'sb':esb}
def _calc_binned(self,x0,y0,binsize):
avgx,avgy,minx,maxx,cnt = UT.calc_binned(x0, y0, num=binsize, returnminmax=True)
LOG.debug('len(avgx)={0},len(avgy)={1},len(minx)={2},len(maxx)={3}'.
format(len(avgx),len(avgy),len(minx),len(maxx)))
avgy1 = N.concatenate([avgy,[0]])
delta = maxx - minx
hight = (avgy+avgy1[1:])/2.
if len(maxx)>0:
auc = N.sum(delta*hight)/(maxx[0]-minx[-1])
else:
auc = 0.
LOG.debug('len(x0)={0},len(y0)={1}, auc={2:.3f}'.format(len(x0),len(y0),auc))
return auc,maxx,avgy,x0,y0
def calc_stats(self):
ecovname = self.colname('ecov')
jcntname = self.colname('jcnt')
jhitname = self.colname2('jhit', self.en2.code)
def _findclosest(e, which):
e['dlen'] = N.abs(e['len']-e['b_len'].astype(float))
e['ratio'] = e['b_len'].astype(float)/e['len']
e = e.sort_values(['_id','dlen'],ascending=True)
f = e.groupby('_id',sort=False).first().reset_index()
self.closest[which] = f
return f
def _count(dw, da1, da2, which):
if which != 'j':
da1 = da1[da1[ecovname]>0]
dw = dw[dw[ecovname]>0]
#da2 = da2[da2[ecovname]>0]
else:
da1 = da1[da1[jcntname]>0]
dw = dw[dw[jcntname]>0]
#da2 = da2[da2[jcntname]>0]
pop = set(da1['_id'].values)
hit = set(dw['_id'].values)
pop2 = set(da2['_id'].values)
#dif = pop.difference(hit)
if len(pop)==0:
LOG.warning('no elements in {0} for population1'.format(self.abbr[which]))
if len(pop2)==0:
LOG.warning('no elements in {0} for population2'.format(self.abbr[which]))
if len(hit)==0:
LOG.warning('no elements in {0} for match'.format(self.abbr[which]))
np1,nh,np2=len(pop),len(hit),len(pop2)
r1 = float(nh)/max(1,np1)
r2 = float(nh)/max(1,np2)
LOG.info( '[{5}] detected1:{0},\tmatched:{1},\t(detected2:{2}),\tratio:{3:.2f},\t(ratio2:{4:.2f})'.
format(np1,nh,np2,r1,r2, which) )
#return hit, pop, pop2
return nh,np1,np2
for which in ['i','5','3','s','j','5b','3b','sb']:
LOG.debug(which+'='*10)
cn = 'hit{0}'.format(which)
if which != 'j':
e1,e2 = self.e1,self.e2
# use exons with reads
ea1 = e1[(e1['cat']==which[0])][['_id',ecovname,'name']].copy() # all exons
if len(which)==1:
ea2 = e2[(e2['cat']==which[0])]
else: # all of exons allowed
ea2 = e2
ew = self.e[which] # matched exons
hit, pop, pop2 = _count(ew, ea1, ea2, which)
ew2 = _findclosest(ew, which) # calculate ratio
i2r = UT.df2dict(ew2,'_id','ratio')
ea1[cn] = [i2r.get(x,0) for x in ea1['_id']]
ea1 = ea1.set_index('_id')
x = N.log2(ea1[ecovname]+1) # log coverage
y = ea1[cn]
ns = ea1['name']
else:
sa = self.s1
hit, pop, pop2 = _count(self.e['j'], sa, self.s2, which)
sa[cn] = [1 if x>0 else 0 for x in sa[jhitname]] # in case of NaN
sa = sa.set_index('_id')
x = N.log2(sa[jcntname]+1)
y = sa[cn]
ns = sa['name']
# gen4 ecov>0, detected or not
# if which != 'j':
# idx2 = x>0
# x2 = x[idx2].values
# y4 = N.array(y[idx2]>0, dtype=int)
# else:
# x2 = x.values
# y4 = N.array(y>0, dtype=int)
# only consider ones detected in the reference (en1)
idx2 = x>0
x2 = x[idx2].values
y4 = N.array(y[idx2]>0, dtype=int) # binary detection indicator (ratio>0)
try:
x3,y3,xth = UT.fit_sigmoid(x2,y4,(0,5),0.99)
except:
xth = N.NaN
auc4,maxx4,avgy4,x4,y4 = self._calc_binned(x2,y4,self.binsize)
p1 = float(hit)/pop if pop>0 else 0.
p2 = float(hit)/pop2 if pop2>0 else 0.
self.ratios[which] = PD.DataFrame({'x':x, 'y':y, 'name':ns})
self.stats[which] = {'detected1':pop, # int
'matched':hit, # int
'detected2':pop2, # int
'p1':p1, # float
'p2':p2, # float
'auc':auc4, # float
'maxx':list(maxx4), # list
'avgy':list(avgy4), # list
'xth':xth, # float
}
# Not implemented yet:
# (4. ELC: exon length completeness = max(ratio of exon length covered by overlapping target gene))
# use ci overlaps
def calc_completeness(self):
"""Completeness measures how much of the reference gene structure is recovered.
1. GLC: gene length completeness = max(ratio of gene length covered by overlapping target gene)
2. ECC: exon count completeness = max(ratio of overlapping exon counts)
3. JCC: junction count completeness = max(ratio of overlapping junction counts)
"""
ov = self.ov # all
if self.exclude_se_from_completeness:
ov = ov[ov['cat']!='s']
# actual overlap with correct strand
ov2 = ov[(ov['b__gidx']!='.')&((ov['strand']==ov['b_strand'])|(ov['b_strand']=='.'))]
if self.exclude_se_from_completeness:
ov2 = ov2[ov2['b_cat']!='s']
gcovname = self.colname('gcov')
g2gcov = UT.df2dict(self.e1, '_gidx', gcovname)
xlim = [0,6]
# GLC
g1 = ov.groupby('_gidx')
glc = (g1['ed'].max()-g1['st'].min()).to_frame('glen')
g2 = ov2.groupby(['_gidx','b__gidx'])
gl2 = (g2['ed'].max()-g2['st'].min()).to_frame('b_glen').reset_index()
gl2 = gl2.groupby('_gidx')['b_glen'].max()
g2gl2 = UT.series2dict(gl2)
glc['b_glen'] = [g2gl2.get(x,0) for x in glc.index]
glc['y'] = glc['b_glen']/glc['glen']
glc['x'] = N.log2(N.array([g2gcov[x] for x in glc.index])+1.)
self.ratios['glc'] = glc[['x','y']]
x,y = glc['x'].values,glc['y'].values
x2,y2,xth = UT.fit_sigmoid(x,y,xlim,0.99)
auc,maxx,avgy,x,y = self._calc_binned(x,y,self.binsize)
self.stats['glc'] = {'p1':N.sum(glc['b_glen']>0)/float(len(glc)), # float ratio detected
'auc':auc, # float
'maxx':list(maxx), # list
'avgy':list(avgy), # list
'xth':xth, # float
}
# ECC
ecc = ov.groupby(['_gidx','_id']).first().reset_index().groupby('_gidx').size().to_frame('#exons')
ec2 = ov2.groupby(['_gidx','b__gidx','_id']).first().reset_index()
ec2 = ec2.groupby(['_gidx','b__gidx']).size().to_frame('ec').reset_index()
ec2 = ec2.groupby('_gidx')['ec'].max()
g2ec2 = UT.series2dict(ec2)
ecc['b_#exons'] = [g2ec2.get(x,0) for x in ecc.index]
ecc['y'] = ecc['b_#exons']/ecc['#exons']
ecc['x'] = N.log2(N.array([g2gcov[x] for x in ecc.index])+1.)
self.ratios['ecc'] = ecc[['x','y']]
x,y = ecc['x'].values,ecc['y'].values
x2,y2,xth = UT.fit_sigmoid(x,y,xlim,0.99)
auc,maxx,avgy,x,y = self._calc_binned(x,y,self.binsize)
self.stats['ecc'] = {'p1':N.sum(ecc['b_#exons']>0)/float(len(ecc)),
'auc':auc,
'maxx':list(maxx),
'avgy':list(avgy),
'xth':xth}
# JCC
s1 = self.s1
jcc = s1.groupby('_gidx').size().to_frame('jc')
if '_gidx' not in self.s2: # adapt to old version where sj.txt.gz did not contain _gidx
a2g = UT.df2dict(self.e2, 'a_id','_gidx')
d2g = UT.df2dict(self.e2, 'd_id','_gidx')
self.s2['_gidx'] = [a2g.get(x,d2g.get(y,0)) for x,y in self.s2[['a_id','d_id']].values]
l2g2 = UT.df2dict(self.s2, 'locus', '_gidx')
s1['b__gidx'] = [l2g2.get(x,'.') for x in s1['locus'].values]
s1o = s1[s1['b__gidx']!='.'] # overlapping
jc2 = s1o.groupby(['_gidx','b__gidx']).size().to_frame('jc2').reset_index()
jc2 = jc2.groupby('_gidx')['jc2'].max()
g2jc2 = UT.series2dict(jc2)
jcc['b_jc'] = [g2jc2.get(x,0) for x in jcc.index]
jcc['y'] = jcc['b_jc']/jcc['jc']
jcc['x'] = N.log2(N.array([g2gcov[x] for x in jcc.index])+1.)
self.ratios['jcc'] = jcc[['x','y']]
x,y = jcc['x'].values,jcc['y'].values
x2,y2,xth = UT.fit_sigmoid(x,y,xlim,0.99)
auc,maxx,avgy,x,y = self._calc_binned(x,y,self.binsize)
self.stats['jcc'] = {'p1':N.sum(jcc['b_jc']>0)/float(len(jcc)),
'auc':auc,
'maxx':list(maxx),
'avgy':list(avgy),
'xth':xth}
def _plot(self, x, y, ax, ca='go-', cf='r.-', cd='b.',pw='dfat',color=None,
binsize=25,xlim=(0,7),yth=0.99,scale=100,label='', alpha=0.1, which=None):
"""Plot dots or sigmoid fit or binned average.
Args:
x,y: data points, y should be in the range [0,1]
scale: scale factor for y, default 100, i.e. [0,1]=>[0,100]
ax: Axes object
pw: code to indicate what to plot d:dot, f:sigmoid fit,
a:binned average, t:sigmoid threshold, default 'daft'
cd: color for dot
cf: color for sigmoid fit
ca: color for binned average
binsize: for binned average
xlim: x xlimit, default (0,7)
yth: Y threshold for sigmoid fit, xth is calculated and indicated (if 't' in pw)
"""
if 'f' in pw or ('t' in pw and which is None):
x2,y2,xth = UT.fit_sigmoid(x,y,xlim,yth)
if 'd' in pw: # dot
ax.plot(x,scale*y,'b.', alpha=alpha, label=label)
if 'f' in pw: # fit
ax.plot(x2,scale*y2,cf, label=label)
if 'a' in pw: # avg
if which is None:
auc,maxx,avgy,x,y = self._calc_binned(x,y,self.binsize)
#avgx,avgy = UT.calc_binned(x,y,num=binsize)
else:
st = self.stats[which]
maxx,avgy = N.array(st['maxx']),N.array(st['avgy'])
if color is None:
ax.plot(maxx,scale*avgy, ca, label=label)
else:
ax.plot(maxx,scale*avgy, ca, label=label, color=color)
if 't' in pw: # threshold
if which is not None:
xth = self.stats[which]['xth']
if xth < xlim[1]:
ax.plot([xth,xth],[0,scale],cf+'-')
ax.text(xth, 10, '{0:.2f}'.format(xth))
ax.set_xlim([-0.5,xlim[1]])
ax.set_ylim([-5,105])
def get_detection_percentages(self):
"""Makes a dataframe containing detection percentages. """
st = self.stats
order = ['i','5','5b','3','3b','s','sb','j']#,'glc','ecc','jcc']
dp1 = {k: 100.*st[k]['p1'] for k in order}
dp2 = {k: 100.*st[k]['p2'] for k in order}
df = PD.DataFrame({'%detected 1':dp1, '%detected 2':dp2})
return df.ix[order]
def get_element_counts(self, sj, ex):
"""Makes a dataframe containing counts of elements."""
cnts = {}
seidx = ex['cat']=='s'
cnts['#se'] = N.sum(seidx)
cnts['#me'] = len(ex)-cnts['#se']
# ng_se = len(set(ex[seidx]['_gidx'].values))
# assert(ng_se == cnts['#se'])
cnts['#megenes'] = len(set(ex[ex['cat']!='s']['_gidx'].values))
cnts['#genes'] = len(set(ex['_gidx'].values))
cnts['#j'] = len(sj)
return PD.DataFrame(cnts, index=['counts']).T
def plot_detection(self, ax=None, w1=['i','5','3','s','j'],w2=[0]):
"""Make bar graphs of detection percentages.
Returns:
Axes object
"""
if ax is None:
fig, ax = P.subplots(1,1,figsize=(3,3))
df = self.get_detection_percentages()
w2 = [df.columns[x] for x in w2]
ax = df.ix[w1][w2].plot(kind='bar', legend=False, ax=ax)
st = self.stats
ax.set_title('{0}/{1}'.format(st['code1'],st['code2']))
def plot_sensitivity(self, color='b', ptyp='.-', ypos=0, xpos=0, axr=None, lineonly=False, ws = WSDEFAULT):
st = self.stats
p1c = st['code1'] # gen4
p2c = st['code2']
def _plot_one(ax, which, label, color, ptyp, ypos=0, xpos=0):
s = self.stats[which]
x = N.concatenate([N.array(s['maxx']),[0]])
y = N.concatenate([100*N.array(s['avgy']),[0]])
# ax.plot(s['maxx'],100*N.array(s['avgy']),color+'.-',ms=5, label=label)
ax.plot(x,y,ptyp,ms=5, label=label,color=color)
ma = N.ceil(N.max(s['maxx']))+0.5
ax.set_xlim([-0.5,ma])
ax.text(0.25+0.35*xpos,0.07*(1+ypos),'{0}: {1:.2f}'.format(label,s['auc']),
transform=ax.transAxes)
if axr is None:
fig,axr = P.subplots(1,len(ws),figsize=(3*len(ws),3),sharey=True)
P.subplots_adjust(wspace=0.07, top=0.85)
else:
assert len(axr)==len(ws)
fig = None
for i,w in enumerate(ws):
ax = axr[i]
if isinstance(w, tuple):
_plot_one(ax, w[0], p2c, color,ptyp, ypos, 0)
_plot_one(ax, w[1], '- -', color,ptyp+'-', ypos, 1)
w = w[0]
else:
_plot_one(ax, w, p2c, color,ptyp, ypos)
if not lineonly:
ax.set_title(self.abbr[w])
if w!='j':
ax.set_xlabel('log2({0}.{1}_ecov+1)'.format(p1c, self.datacode))
else:
ax.set_xlabel('log2({0}.{1}_jcnt+1)'.format(p1c, self.datacode))
if not lineonly:
axr[0].set_ylim([-5,105])
axr[0].set_ylabel('%detected')
axr[len(ws)-1].legend(loc='center left', bbox_to_anchor=(1.0,0.5))
if fig is not None:
fig.suptitle('{1}/{0}'.format(p1c,p2c))
return axr
def plot_ratio(self,axr=None,plotxlabel=True,label='',disp='both', xlim=(0,25), ylim=(0.01,1000), alpha=0.1, ms=1):
"""Plot length ratios of best matching exons """
st = self.stats
p1c = st['code1'] # gen4
p2c = st['code2']
tgts = ['5','3','s']
if axr is None:
fig,axr = P.subplots(1,len(tgts),figsize=(3*len(tgts),3),sharex=True,sharey=True)
P.subplots_adjust(wspace=0.07,hspace=0.15,top=0.85)
else:
fig = None
for i,w in enumerate(tgts):
ax = axr[i]
#pop,hit,dif,auc,maxx,avgy,x,y = self.stats[w]
st = self.stats[w]
auc,maxx,avgy= st['auc'],N.array(st['maxx']),N.array(st['avgy'])
xy = self.ratios[w]
x = xy['x'].values
y = xy['y'].values
if disp!='pdf':
if w=='s':
ax.plot(x,y,'.',ms=ms, alpha=min(1,3*alpha))
else:
ax.plot(x,y,'.',ms=ms, alpha=alpha)
#ax.plot(maxx,avgy,'ro-',ms=3,alpha=0.3)
ax.set_yscale('log')
ax.set_ylim(ylim)
ax.set_xlim(xlim)
if disp!='png':
if i==0:
ax.set_ylabel('{1}_len/{0}_len+1'.format(p1c,p2c))
if plotxlabel:
ax.set_xlabel('log2({0}.{1}_ecov+1)'.format(p1c, self.datacode))
ax.set_title(label+self.abbr[w])
m = 10**(N.nanmean(N.log10(y[(x>0)&(y>0)])))
ax.text(5,10**2,'avg:{0:.2f}'.format(m))
else:
ax.set_yticks([])
ax.set_xticks([])
if fig is not None:
fig.suptitle('{1}/{0}'.format(p1c,p2c))
return axr
def plot_completeness(self, axr=None, tgts=['glc','ecc','jcc'], pw='dft', disp='both',
title=None, xlim=[0,15], alpha=0.1, **kw):
st = self.stats
p1c = st['code1'] # gen4
p2c = st['code2']
if axr is None:
fig,axr = P.subplots(1,len(tgts),figsize=(3*len(tgts),3),sharex=False,sharey=True)
P.subplots_adjust(wspace=0.07,hspace=0.15,top=0.85)
else:
fig = None
for i, w in enumerate(tgts):
ax = axr[i]
d = self.ratios[w]
x = d['x'].values
y = d['y'].values
self._plot(x,y,ax,pw=pw,scale=100, which=w,xlim=xlim,**kw)
if disp!='png':
if i==0:
ax.set_ylabel('% covered')
if (fig is not None and i==1):
ax.set_xlabel('log2({0}.{1}_gcov+1)'.format(p1c, self.datacode))
ax.set_title(w.upper())
else:
ax.set_yticks([])
ax.set_xticks([])
ax.locator_params(axis='x', nbins=4)
if fig is not None:
if title is None:
title = '{1}/{0}'.format(p1c,p2c)
fig.suptitle(title)
return axr
def plot_elen_vs_tlen_gtf(gtf, ax=None, ms=1, alpha=0.1, title=''):
gtf['tlen'] = gtf['ed']-gtf['st']
tr = gtf[gtf['typ']=='transcript'][['transcript_id','tlen']].copy().set_index('transcript_id')
exons = gtf[gtf['typ']=='exon']
tr['elen'] = exons.groupby('transcript_id')['tlen'].sum()
return _plot_evt(tr,ax,ms,alpha,title)
def _plot_evt(df,ax=None, ms=1, alpha=0.1, title=''):
if ax is None:
fig,ax = P.subplots(1,1,figsize=(4,4))
x = N.log10(df['tlen'])
y = N.log10(df['elen'])
ax.set_xlabel('log10(tlen)')
ax.set_ylabel('log10(elen)')
ax.set_title(title)
ax.plot(x,y,'.',ms=ms,alpha=alpha)
ax.set_xlim([2,6.5])
ax.set_ylim([1,5.5])
ax.locator_params(nbins=5)
return ax
def plot_elen_vs_tlen_bed12(bed, ax=None, ms=1, alpha=0.1, title=''):
bed['tlen'] = bed['ed']-bed['st']
bed['elen'] = bed['esizes'].apply(lambda x: N.sum([int(y) for y in x.split(',')[:-1]]))
return _plot_evt(bed,ax,ms,alpha,title)
| mit |
stinebuu/nest-simulator | doc/userdoc/guides/spatial/user_manual_scripts/layers.py | 17 | 11076 | # -*- coding: utf-8 -*-
#
# layers.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
# Run as python3 layers.py > layers.log
import matplotlib.pyplot as plt
import nest
import numpy as np
# seed NumPy RNG to ensure identical results for runs with random placement
np.random.seed(1234567)
def beautify_layer(layer, fig=plt.gcf(), xlabel=None, ylabel=None,
xlim=None, ylim=None, xticks=None, yticks=None, dx=0, dy=0):
"""Assume either x and ylims/ticks given or none"""
ctr = layer.spatial['center']
ext = layer.spatial['extent']
if xticks is None:
if 'shape' in layer.spatial:
dx = float(ext[0]) / layer.spatial['shape'][0]
dy = float(ext[1]) / layer.spatial['shape'][1]
xticks = ctr[0] - ext[0] / 2. + dx / 2. + dx * np.arange(
layer.spatial['shape'][0])
yticks = ctr[1] - ext[1] / 2. + dy / 2. + dy * np.arange(
layer.spatial['shape'][1])
if xlim is None:
xlim = [ctr[0] - ext[0] / 2. - dx / 2., ctr[0] + ext[
0] / 2. + dx / 2.] # extra space so extent is visible
ylim = [ctr[1] - ext[1] / 2. - dy / 2., ctr[1] + ext[1] / 2. + dy / 2.]
else:
ext = [xlim[1] - xlim[0], ylim[1] - ylim[0]]
ax = fig.gca()
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_aspect('equal', 'box')
ax.set_xticks(xticks)
ax.set_yticks(yticks)
ax.grid(True)
ax.set_axisbelow(True)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return
# --------------------------------------------------
nest.ResetKernel()
#{ layer1 #}
layer = nest.Create('iaf_psc_alpha',
positions=nest.spatial.grid(shape=[5, 5]))
#{ end #}
fig = nest.PlotLayer(layer, nodesize=50)
beautify_layer(layer, fig, xlabel='x-axis (columns)', ylabel='y-axis (rows)')
ax = fig.gca()
tx = []
for r in range(5):
tx.append(ax.text(0.65, 0.4 - r * 0.2, str(r),
horizontalalignment='center',
verticalalignment='center'))
tx.append(ax.text(-0.4 + r * 0.2, 0.65, str(r),
horizontalalignment='center',
verticalalignment='center'))
# For bbox_extra_artists, see
# https://github.com/matplotlib/matplotlib/issues/351
# plt.savefig('../user_manual_figures/layer1.png', bbox_inches='tight',
# bbox_extra_artists=tx)
print("#{ layer1s.log #}")
#{ layer1s #}
print(layer.spatial)
#{ end #}
print("#{ end.log #}")
print("#{ layer1p.log #}")
#{ layer1p #}
nest.PrintNodes()
#{ end #}
print("#{ end.log #}")
# --------------------------------------------------
nest.ResetKernel()
#{ layer2 #}
layer = nest.Create('iaf_psc_alpha',
positions=nest.spatial.grid(
shape=[5, 5],
extent=[2.0, 0.5]))
#{ end #}
fig = nest.PlotLayer(layer, nodesize=50)
beautify_layer(layer, fig, xlabel='x-axis (columns)', ylabel='y-axis (rows)')
ax = fig.gca()
tx = []
for r in range(5):
tx.append(fig.gca().text(1.25, 0.2 - r * 0.1, str(r),
horizontalalignment='center',
verticalalignment='center'))
tx.append(fig.gca().text(-0.8 + r * 0.4, 0.35, str(r),
horizontalalignment='center',
verticalalignment='center'))
# See https://github.com/matplotlib/matplotlib/issues/351
plt.savefig('../user_manual_figures/layer2.png', bbox_inches='tight',
bbox_extra_artists=tx)
# --------------------------------------------------
nest.ResetKernel()
#{ layer3 #}
layer1 = nest.Create('iaf_psc_alpha',
positions=nest.spatial.grid(shape=[5, 5]))
layer2 = nest.Create('iaf_psc_alpha',
positions=nest.spatial.grid(
shape=[5, 5],
center=[-1., 1.]))
layer3 = nest.Create('iaf_psc_alpha',
positions=nest.spatial.grid(
shape=[5, 5],
center=[1.5, 0.5]))
#{ end #}
fig = nest.PlotLayer(layer1, nodesize=50)
nest.PlotLayer(layer2, nodesize=50, nodecolor='g', fig=fig)
nest.PlotLayer(layer3, nodesize=50, nodecolor='r', fig=fig)
beautify_layer(layer1, fig, xlabel='x-axis (columns)', ylabel='y-axis (rows)',
xlim=[-1.6, 2.1], ylim=[-0.6, 1.6],
xticks=np.arange(-1.4, 2.05, 0.2),
yticks=np.arange(-0.4, 1.45, 0.2))
plt.savefig('../user_manual_figures/layer3.png', bbox_inches='tight')
# --------------------------------------------------
nest.ResetKernel()
#{ layer3a #}
nx, ny = 5, 3
d = 0.1
layer = nest.Create('iaf_psc_alpha',
positions=nest.spatial.grid(
shape=[nx, ny],
extent=[nx * d, ny * d],
center=[nx * d / 2., 0.]))
#{ end #}
fig = nest.PlotLayer(layer, nodesize=100)
plt.plot(0, 0, 'x', markersize=20, c='k', mew=3)
plt.plot(nx * d / 2, 0, 'o', markersize=20, c='k', mew=3, mfc='none',
zorder=100)
beautify_layer(layer, fig, xlabel='x-axis (columns)', ylabel='y-axis (rows)',
xticks=np.arange(0., 0.501, 0.05),
yticks=np.arange(-0.15, 0.151, 0.05),
xlim=[-0.05, 0.55], ylim=[-0.2, 0.2])
plt.savefig('../user_manual_figures/layer3a.png', bbox_inches='tight')
# --------------------------------------------------
nest.ResetKernel()
#{ layer4 #}
pos = nest.spatial.free(pos=nest.random.uniform(min=-0.5, max=0.5),
num_dimensions=2)
layer = nest.Create('iaf_psc_alpha', 50,
positions=pos)
#{ end #}
fig = nest.PlotLayer(layer, nodesize=50)
beautify_layer(layer, fig, xlabel='x-axis (columns)', ylabel='y-axis (rows)',
xlim=[-0.55, 0.55], ylim=[-0.55, 0.55],
xticks=[-0.5, 0., 0.5], yticks=[-0.5, 0., 0.5])
plt.savefig('../user_manual_figures/layer4.png', bbox_inches='tight')
# --------------------------------------------------
nest.ResetKernel()
#{ layer4b #}
pos = nest.spatial.free(pos=[[-0.5, -0.5], [-0.25, -0.25], [0.75, 0.75]])
layer = nest.Create('iaf_psc_alpha', positions=pos)
#{ end #}
fig = nest.PlotLayer(layer, nodesize=50)
beautify_layer(layer, fig, xlabel='x-axis (columns)', ylabel='y-axis (rows)',
xlim=[-0.55, 0.80], ylim=[-0.55, 0.80],
xticks=[-0.75, -0.5, -0.25, 0., 0.25, 0.5, 0.75, 1.],
yticks=[-0.75, -0.5, -0.25, 0., 0.25, 0.5, 0.75, 1.])
plt.savefig('../user_manual_figures/layer4b.png', bbox_inches='tight')
# --------------------------------------------------
nest.ResetKernel()
#{ layer4_3d #}
pos = nest.spatial.free(nest.random.uniform(min=-0.5, max=0.5),
num_dimensions=3)
layer = nest.Create('iaf_psc_alpha', 200, positions=pos)
#{ end #}
fig = nest.PlotLayer(layer, nodesize=50)
plt.savefig('../user_manual_figures/layer4_3d.png', bbox_inches='tight')
# --------------------------------------------------
nest.ResetKernel()
#{ layer4_3d_b #}
pos = nest.spatial.grid(shape=[4, 5, 6])
layer = nest.Create('iaf_psc_alpha', positions=pos)
#{ end #}
fig = nest.PlotLayer(layer, nodesize=50)
plt.savefig('../user_manual_figures/layer4_3d_b.png', bbox_inches='tight')
# --------------------------------------------------
nest.ResetKernel()
#{ player #}
layer = nest.Create('iaf_psc_alpha',
positions=nest.spatial.grid(
shape=[5, 1],
extent=[5., 1.],
edge_wrap=True))
#{ end #}
# fake plot with layer on line and circle
clist = [(0, 0, 1), (0.35, 0, 1), (0.6, 0, 1), (0.8, 0, 1), (1.0, 0, 1)]
fig = plt.figure()
ax1 = fig.add_subplot(221)
ax1.plot([0.5, 5.5], [0, 0], 'k-', lw=2)
ax1.scatter(range(1, 6), [0] * 5, s=200, c=clist)
ax1.set_xlim([0, 6])
ax1.set_ylim([-0.5, 1.25])
ax1.set_aspect('equal', 'box')
ax1.set_xticks([])
ax1.set_yticks([])
for j in range(1, 6):
ax1.text(j, 0.5, str('(%d,0)' % (j - 3)),
horizontalalignment='center', verticalalignment='bottom')
ax1a = fig.add_subplot(223)
ax1a.plot([0.5, 5.5], [0, 0], 'k-', lw=2)
ax1a.scatter(range(1, 6), [0] * 5, s=200,
c=[clist[0], clist[1], clist[2], clist[2], clist[1]])
ax1a.set_xlim([0, 6])
ax1a.set_ylim([-0.5, 1.25])
ax1a.set_aspect('equal', 'box')
ax1a.set_xticks([])
ax1a.set_yticks([])
for j in range(1, 6):
ax1a.text(j, 0.5, str('(%d,0)' % (j - 3)),
horizontalalignment='center', verticalalignment='bottom')
ax2 = fig.add_subplot(122)
phic = np.arange(0., 2 * np.pi + 0.5, 0.1)
r = 5. / (2 * np.pi)
ax2.plot(r * np.cos(phic), r * np.sin(phic), 'k-', lw=2)
phin = np.arange(0., 4.1, 1.) * 2 * np.pi / 5
ax2.scatter(r * np.sin(phin), r * np.cos(phin), s=200,
c=[clist[0], clist[1], clist[2], clist[2], clist[1]])
ax2.set_xlim([-1.3, 1.3])
ax2.set_ylim([-1.2, 1.2])
ax2.set_aspect('equal', 'box')
ax2.set_xticks([])
ax2.set_yticks([])
for j in range(5):
ax2.text(1.4 * r * np.sin(phin[j]), 1.4 * r * np.cos(phin[j]),
str('(%d,0)' % (j + 1 - 3)),
horizontalalignment='center', verticalalignment='center')
plt.savefig('../user_manual_figures/player.png', bbox_inches='tight')
# --------------------------------------------------
nest.ResetKernel()
#{ layer6 #}
layer1 = nest.Create('iaf_cond_alpha',
positions=nest.spatial.grid(shape=[2, 1]))
layer2 = nest.Create('poisson_generator',
positions=nest.spatial.grid(shape=[2, 1]))
#{ end #}
print("#{ layer6 #}")
nest.PrintNodes()
print("#{ end #}")
# --------------------------------------------------
nest.ResetKernel()
#{ vislayer #}
layer = nest.Create('iaf_psc_alpha',
positions=nest.spatial.grid(shape=[21, 21]))
probability_param = nest.spatial_distributions.gaussian(nest.spatial.distance, std=0.15)
conndict = {'rule': 'pairwise_bernoulli',
'p': probability_param,
'mask': {'circular': {'radius': 0.4}}}
nest.Connect(layer, layer, conndict)
fig = nest.PlotLayer(layer, nodesize=80)
ctr = nest.FindCenterElement(layer)
nest.PlotTargets(ctr, layer, fig=fig,
mask=conndict['mask'], probability_parameter=probability_param,
src_size=250, tgt_color='red', tgt_size=20, mask_color='red',
probability_cmap='Greens')
#{ end #}
plt.savefig('../user_manual_figures/vislayer.png', bbox_inches='tight')
| gpl-2.0 |
AnishShah/tensorflow | tensorflow/contrib/labeled_tensor/python/ops/ops.py | 27 | 46439 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Non-core ops for LabeledTensor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import types
import numpy as np
from six import string_types
from tensorflow.contrib.labeled_tensor.python.ops import _typecheck as tc
from tensorflow.contrib.labeled_tensor.python.ops import core
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import numerics
from tensorflow.python.ops import random_ops
from tensorflow.python.training import input # pylint: disable=redefined-builtin
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensor, ops.Tensor, core.Axis,
tc.Optional(string_types))
def _gather_1d_on_axis(labeled_tensor, indexer, axis, name=None):
with ops.name_scope(name, 'lt_take', [labeled_tensor]) as scope:
temp_axes = core.Axes([axis] + list(
labeled_tensor.axes.remove(axis.name).values()))
transposed = core.transpose(labeled_tensor, temp_axes.keys())
indexed = core.LabeledTensor(
array_ops.gather(transposed.tensor, indexer), temp_axes)
return core.transpose(indexed, labeled_tensor.axes.keys(), name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Mapping(string_types,
tc.Union(slice, collections.Hashable, list)),
tc.Optional(string_types))
def select(labeled_tensor, selection, name=None):
"""Slice out a subset of the tensor.
Args:
labeled_tensor: The input tensor.
selection: A dictionary mapping an axis name to a scalar, slice or list of
values to select. Currently supports two types of selections:
(a) Any number of scalar and/or slice selections.
(b) Exactly one list selection, without any scalars or slices.
name: Optional op name.
Returns:
The selection as a `LabeledTensor`.
Raises:
ValueError: If the tensor doesn't have an axis in the selection or if
that axis lacks labels.
KeyError: If any labels in a selection are not found in the original axis.
NotImplementedError: If you attempt to combine a list selection with
scalar selection or another list selection.
"""
with ops.name_scope(name, 'lt_select', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
slices = {}
indexers = {}
for axis_name, value in selection.items():
if axis_name not in labeled_tensor.axes:
raise ValueError(
'The tensor does not have an axis named %s. Its axes are: %r' %
(axis_name, labeled_tensor.axes.keys()))
axis = labeled_tensor.axes[axis_name]
if axis.labels is None:
raise ValueError(
'The axis named %s does not have labels. The axis is: %r' %
(axis_name, axis))
if isinstance(value, slice):
# TODO(shoyer): consider deprecating using slices in favor of lists
if value.start is None:
start = None
else:
start = axis.index(value.start)
if value.stop is None:
stop = None
else:
# For now, follow the pandas convention of making labeled slices
# inclusive of both bounds.
stop = axis.index(value.stop) + 1
if value.step is not None:
raise NotImplementedError('slicing with a step is not yet supported')
slices[axis_name] = slice(start, stop)
# Needs to be after checking for slices, since slice objects claim to be
# instances of collections.Hashable but hash() on them fails.
elif isinstance(value, collections.Hashable):
slices[axis_name] = axis.index(value)
elif isinstance(value, list):
if indexers:
raise NotImplementedError(
'select does not yet support more than one list selection at '
'the same time')
indexer = [axis.index(v) for v in value]
indexers[axis_name] = ops.convert_to_tensor(indexer, dtype=dtypes.int64)
else:
# If type checking is working properly, this shouldn't be possible.
raise TypeError('cannot handle arbitrary types')
if indexers and slices:
raise NotImplementedError(
'select does not yet support combined scalar and list selection')
# For now, handle array selection separately, because tf.gather_nd does
# not support gradients yet. Later, using gather_nd will let us combine
# these paths.
if indexers:
(axis_name, indexer), = indexers.items()
axis = core.Axis(axis_name, selection[axis_name])
return _gather_1d_on_axis(labeled_tensor, indexer, axis, name=scope)
else:
return core.slice_function(labeled_tensor, slices, name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(
tc.Collection(core.LabeledTensorLike), string_types,
tc.Optional(string_types))
def concat(labeled_tensors, axis_name, name=None):
"""Concatenate tensors along a dimension.
See tf.concat.
Args:
labeled_tensors: A list of input LabeledTensors.
axis_name: The name of the axis along which to concatenate.
name: Optional op name.
Returns:
The concatenated tensor.
The coordinate labels for the concatenation dimension are also concatenated,
if they are available for every tensor.
Raises:
ValueError: If fewer than one tensor inputs is provided, if the tensors
have incompatible axes, or if `axis_name` isn't the name of an axis.
"""
with ops.name_scope(name, 'lt_concat', labeled_tensors) as scope:
labeled_tensors = [
core.convert_to_labeled_tensor(lt) for lt in labeled_tensors
]
if len(labeled_tensors) < 1:
raise ValueError('concat expects at least 1 tensor, but received %s' %
labeled_tensors)
# All tensors must have these axes.
axes_0 = labeled_tensors[0].axes
axis_names = list(axes_0.keys())
if axis_name not in axis_names:
raise ValueError('%s not in %s' % (axis_name, axis_names))
shared_axes = axes_0.remove(axis_name)
tensors = [labeled_tensors[0].tensor]
concat_axis_list = [axes_0[axis_name]]
for labeled_tensor in labeled_tensors[1:]:
current_shared_axes = labeled_tensor.axes.remove(axis_name)
if current_shared_axes != shared_axes:
# TODO(shoyer): add more specific checks about what went wrong,
# including raising AxisOrderError when appropriate
raise ValueError('Mismatched shared axes: the first tensor '
'had axes %r but this tensor has axes %r.' %
(shared_axes, current_shared_axes))
# Accumulate the axis labels, if they're available.
concat_axis_list.append(labeled_tensor.axes[axis_name])
tensors.append(labeled_tensor.tensor)
concat_axis = core.concat_axes(concat_axis_list)
concat_dimension = axis_names.index(axis_name)
concat_tensor = array_ops.concat(tensors, concat_dimension, name=scope)
values = list(axes_0.values())
concat_axes = (values[:concat_dimension] + [concat_axis] +
values[concat_dimension + 1:])
return core.LabeledTensor(concat_tensor, concat_axes)
# TODO(shoyer): rename pack/unpack to stack/unstack
@tc.returns(core.LabeledTensor)
@tc.accepts(
tc.Collection(core.LabeledTensorLike),
tc.Union(string_types, core.AxisLike), int, tc.Optional(string_types))
def pack(labeled_tensors, new_axis, axis_position=0, name=None):
"""Pack tensors along a new axis.
See tf.pack.
Args:
labeled_tensors: The input tensors, which must have identical axes.
new_axis: The name of the new axis, or a tuple containing the name
and coordinate labels.
axis_position: Optional integer position at which to insert the new axis.
name: Optional op name.
Returns:
The packed tensors as a single LabeledTensor, with `new_axis` in the given
`axis_position`.
Raises:
ValueError: If fewer than one input tensors is provided, or if the tensors
don't have identical axes.
"""
with ops.name_scope(name, 'lt_pack', labeled_tensors) as scope:
labeled_tensors = [
core.convert_to_labeled_tensor(lt) for lt in labeled_tensors
]
if len(labeled_tensors) < 1:
raise ValueError('pack expects at least 1 tensors, but received %s' %
labeled_tensors)
axes_0 = labeled_tensors[0].axes
for t in labeled_tensors:
if t.axes != axes_0:
raise ValueError('Non-identical axes. Expected %s but got %s' %
(axes_0, t.axes))
pack_op = array_ops.stack(
[t.tensor for t in labeled_tensors], axis=axis_position, name=scope)
axes = list(axes_0.values())
axes.insert(axis_position, new_axis)
return core.LabeledTensor(pack_op, axes)
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(core.LabeledTensorLike,
tc.Optional(string_types), tc.Optional(string_types))
def unpack(labeled_tensor, axis_name=None, name=None):
"""Unpack the tensor.
See tf.unpack.
Args:
labeled_tensor: The input tensor.
axis_name: Optional name of axis to unpack. By default, the first axis is
used.
name: Optional op name.
Returns:
The list of unpacked LabeledTensors.
Raises:
ValueError: If `axis_name` is not an axis on the input.
"""
with ops.name_scope(name, 'lt_unpack', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
axis_names = list(labeled_tensor.axes.keys())
if axis_name is None:
axis_name = axis_names[0]
if axis_name not in axis_names:
raise ValueError('%s not in %s' % (axis_name, axis_names))
axis = axis_names.index(axis_name)
unpack_ops = array_ops.unstack(labeled_tensor.tensor, axis=axis, name=scope)
axes = [a for i, a in enumerate(labeled_tensor.axes.values()) if i != axis]
return [core.LabeledTensor(t, axes) for t in unpack_ops]
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Collection(string_types),
tc.Collection(tc.Union(string_types, core.AxisLike)),
tc.Optional(string_types))
def reshape(labeled_tensor, existing_axes, new_axes, name=None):
"""Reshape specific axes of a LabeledTensor.
Non-indicated axes remain in their original locations.
Args:
labeled_tensor: The input tensor.
existing_axes: List of axis names found on the input tensor. These must
appear sequentially in the list of axis names on the input. In other
words, they must be a valid slice of `list(labeled_tensor.axes.keys())`.
new_axes: List of strings, tuples of (axis_name, axis_value) or Axis objects
providing new axes with which to replace `existing_axes` in the reshaped
result. At most one element of `new_axes` may be a string, indicating an
axis with unknown size.
name: Optional op name.
Returns:
The reshaped LabeledTensor.
Raises:
ValueError: If `existing_axes` are not all axes on the input, or if more
than one of `new_axes` has unknown size.
AxisOrderError: If `existing_axes` are not a slice of axis names on the
input.
"""
with ops.name_scope(name, 'lt_reshape', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
original_axis_names = list(labeled_tensor.axes.keys())
existing_axes = list(existing_axes)
if not set(existing_axes) <= set(original_axis_names):
raise ValueError('existing_axes %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(existing_axes, original_axis_names))
start = original_axis_names.index(existing_axes[0])
stop = original_axis_names.index(existing_axes[-1]) + 1
if existing_axes != original_axis_names[start:stop]:
# We could support existing_axes that aren't a slice by using transpose,
# but that could lead to unpredictable performance consequences because
# transposes are not free in TensorFlow. If we did transpose
# automatically, the user might never realize that their data is being
# produced with the wrong order. (The later will occur with some frequency
# because of how broadcasting automatically choose axis order.)
# So for now we've taken the strict approach.
raise core.AxisOrderError(
'existing_axes %r are not a slice of axis names %r on the input '
'labeled tensor. Use `transpose` or `impose_axis_order` to reorder '
'axes on the input explicitly.' %
(existing_axes, original_axis_names))
if sum(isinstance(axis, string_types) for axis in new_axes) > 1:
raise ValueError(
'at most one axis in new_axes can have unknown size. All other '
'axes must have an indicated integer size or labels: %r' % new_axes)
original_values = list(labeled_tensor.axes.values())
axis_size = lambda axis: -1 if axis.size is None else axis.size
shape = [axis_size(axis) for axis in original_values[:start]]
for axis_ref in new_axes:
if isinstance(axis_ref, string_types):
shape.append(-1)
else:
axis = core.as_axis(axis_ref)
shape.append(axis_size(axis))
shape.extend(axis_size(axis) for axis in original_values[stop:])
reshaped_tensor = array_ops.reshape(
labeled_tensor.tensor, shape, name=scope)
axes = original_values[:start] + list(new_axes) + original_values[stop:]
return core.LabeledTensor(reshaped_tensor, axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, string_types, string_types,
tc.Optional(string_types))
def rename_axis(labeled_tensor, existing_name, new_name, name=None):
"""Rename an axis of LabeledTensor.
Args:
labeled_tensor: The input tensor.
existing_name: Name for an existing axis on the input.
new_name: Desired replacement name.
name: Optional op name.
Returns:
LabeledTensor with renamed axis.
Raises:
ValueError: If `existing_name` is not an axis on the input.
"""
with ops.name_scope(name, 'lt_rename_axis', [labeled_tensor]) as scope:
if existing_name not in labeled_tensor.axes:
raise ValueError('existing_name %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(existing_name, labeled_tensor.axes.keys()))
new_axis = core.Axis(new_name, labeled_tensor.axes[existing_name].value)
return reshape(labeled_tensor, [existing_name], [new_axis], name=scope)
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(string_types, collections.Callable, int, bool,
tc.Collection(core.LabeledTensorLike), bool,
tc.Optional(string_types))
def _batch_helper(default_name,
batch_fn,
batch_size,
enqueue_many,
labeled_tensors,
allow_smaller_final_batch,
name=None):
with ops.name_scope(name, default_name, labeled_tensors) as scope:
labeled_tensors = [
core.convert_to_labeled_tensor(lt) for lt in labeled_tensors
]
batch_ops = batch_fn([t.tensor for t in labeled_tensors], scope)
# TODO(shoyer): Remove this when they sanitize the TF API.
if not isinstance(batch_ops, list):
assert isinstance(batch_ops, ops.Tensor)
batch_ops = [batch_ops]
if allow_smaller_final_batch:
batch_size = None
@tc.returns(core.Axes)
@tc.accepts(core.Axes)
def output_axes(axes):
if enqueue_many:
if 'batch' not in axes or list(axes.keys()).index('batch') != 0:
raise ValueError(
'When enqueue_many is True, input tensors must have an axis '
'called "batch" as their first dimension, '
'but axes were %s' % axes)
culled_axes = axes.remove('batch')
return core.Axes([('batch', batch_size)] + list(culled_axes.values()))
else:
return core.Axes([('batch', batch_size)] + list(axes.values()))
output_labeled_tensors = []
for i, tensor in enumerate(batch_ops):
axes = output_axes(labeled_tensors[i].axes)
output_labeled_tensors.append(core.LabeledTensor(tensor, axes))
return output_labeled_tensors
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(
tc.Collection(core.LabeledTensorLike), int, int, int, bool, bool,
tc.Optional(string_types))
def batch(labeled_tensors,
batch_size,
num_threads=1,
capacity=32,
enqueue_many=False,
allow_smaller_final_batch=False,
name=None):
"""Rebatch a tensor.
See tf.batch.
Args:
labeled_tensors: The input tensors.
batch_size: The output batch size.
num_threads: See tf.batch.
capacity: See tf.batch.
enqueue_many: If true, the input tensors must contain a 'batch' axis as
their first axis.
If false, the input tensors must not contain a 'batch' axis.
See tf.batch.
allow_smaller_final_batch: See tf.batch.
name: Optional op name.
Returns:
The rebatched tensors.
If enqueue_many is false, the output tensors will have a new 'batch' axis
as their first axis.
Raises:
ValueError: If enqueue_many is True and the first axis of the tensors
isn't "batch".
"""
def fn(tensors, scope):
return input.batch(
tensors,
batch_size=batch_size,
num_threads=num_threads,
capacity=capacity,
enqueue_many=enqueue_many,
allow_smaller_final_batch=allow_smaller_final_batch,
name=scope)
return _batch_helper('lt_batch', fn, batch_size, enqueue_many,
labeled_tensors, allow_smaller_final_batch, name)
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(
tc.Collection(core.LabeledTensorLike), int, int, int, bool, int,
tc.Optional(int), bool, tc.Optional(string_types))
def shuffle_batch(labeled_tensors,
batch_size,
num_threads=1,
capacity=32,
enqueue_many=False,
min_after_dequeue=0,
seed=None,
allow_smaller_final_batch=False,
name=None):
"""Rebatch a tensor, with shuffling.
See tf.batch.
Args:
labeled_tensors: The input tensors.
batch_size: The output batch size.
num_threads: See tf.batch.
capacity: See tf.batch.
enqueue_many: If true, the input tensors must contain a 'batch' axis as
their first axis.
If false, the input tensors must not contain a 'batch' axis.
See tf.batch.
min_after_dequeue: Minimum number of elements in the queue after a dequeue,
used to ensure mixing.
seed: Optional random seed.
allow_smaller_final_batch: See tf.batch.
name: Optional op name.
Returns:
The rebatched tensors.
If enqueue_many is false, the output tensors will have a new 'batch' axis
as their first axis.
Raises:
ValueError: If enqueue_many is True and the first axis of the tensors
isn't "batch".
"""
def fn(tensors, scope):
return input.shuffle_batch(
tensors,
batch_size=batch_size,
num_threads=num_threads,
capacity=capacity,
enqueue_many=enqueue_many,
min_after_dequeue=min_after_dequeue,
seed=seed,
allow_smaller_final_batch=allow_smaller_final_batch,
name=scope)
return _batch_helper('lt_shuffle_batch', fn, batch_size, enqueue_many,
labeled_tensors, allow_smaller_final_batch, name)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Mapping(string_types, int),
tc.Optional(int), tc.Optional(string_types))
def random_crop(labeled_tensor, shape_map, seed=None, name=None):
"""Randomly crops a tensor to a given size.
See tf.random_crop.
Args:
labeled_tensor: The input tensor.
shape_map: A dictionary mapping axis names to the size of the random crop
for that dimension.
seed: An optional random seed.
name: An optional op name.
Returns:
A tensor of the same rank as `labeled_tensor`, cropped randomly in the
selected dimensions.
Raises:
ValueError: If the shape map contains an axis name not in the input tensor.
"""
with ops.name_scope(name, 'lt_random_crop', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
for axis_name in shape_map:
if axis_name not in labeled_tensor.axes:
raise ValueError('Selection axis %s not in axes %s' %
(axis_name, labeled_tensor.axes))
shape = []
axes = []
for axis in labeled_tensor.axes.values():
if axis.name in shape_map:
size = shape_map[axis.name]
shape.append(size)
# We lose labels for the axes we crop, leaving just the size.
axes.append((axis.name, size))
else:
shape.append(len(axis))
axes.append(axis)
crop_op = random_ops.random_crop(
labeled_tensor.tensor, shape, seed=seed, name=scope)
return core.LabeledTensor(crop_op, axes)
# TODO(shoyer): Allow the user to select the axis over which to map.
@tc.returns(core.LabeledTensor)
@tc.accepts(collections.Callable, core.LabeledTensorLike,
tc.Optional(string_types))
def map_fn(fn, labeled_tensor, name=None):
"""Map on the list of tensors unpacked from labeled_tensor.
See tf.map_fn.
Args:
fn: The function to apply to each unpacked LabeledTensor.
It should have type LabeledTensor -> LabeledTensor.
labeled_tensor: The input tensor.
name: Optional op name.
Returns:
A tensor that packs the results of applying fn to the list of tensors
unpacked from labeled_tensor.
"""
with ops.name_scope(name, 'lt_map_fn', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
unpack_lts = unpack(labeled_tensor)
# TODO(ericmc): Fix this upstream.
if labeled_tensor.dtype == dtypes.string:
# We must construct the full graph here, because functional_ops.map_fn
# doesn't work for string-valued tensors.
# Constructing the full graph may be slow.
map_lts = [fn(t) for t in unpack_lts]
return pack(map_lts, list(labeled_tensor.axes.values())[0], name=scope)
else:
# Figure out what the axis labels should be, but use tf.map_fn to
# construct the graph because it's efficient.
# It may be slow to construct the full graph, so we infer the labels from
# the first element.
# TODO(ericmc): This builds a subgraph which then gets thrown away.
# Find a more elegant solution.
first_map_lt = fn(unpack_lts[0])
final_axes = list(labeled_tensor.axes.values())[:1] + list(
first_map_lt.axes.values())
@tc.returns(ops.Tensor)
@tc.accepts(ops.Tensor)
def tf_fn(tensor):
original_axes = list(labeled_tensor.axes.values())[1:]
tensor_lt = core.LabeledTensor(tensor, original_axes)
return fn(tensor_lt).tensor
map_op = functional_ops.map_fn(
tf_fn, labeled_tensor.tensor, dtype=first_map_lt.dtype)
map_lt = core.LabeledTensor(map_op, final_axes)
return core.identity(map_lt, name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(collections.Callable, core.LabeledTensorLike,
core.LabeledTensorLike, tc.Optional(string_types))
def foldl(fn, labeled_tensor, initial_value, name=None):
"""Left fold on the list of tensors unpacked from labeled_tensor.
See tf.foldl.
Args:
fn: The function to apply to each unpacked LabeledTensor.
It should have type (LabeledTensor, LabeledTensor) -> LabeledTensor.
Its arguments are (accumulated_value, next_value).
labeled_tensor: The input tensor.
initial_value: The initial value of the accumulator.
name: Optional op name.
Returns:
The accumulated value.
"""
with ops.name_scope(name, 'lt_foldl',
[labeled_tensor, initial_value]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
initial_value = core.convert_to_labeled_tensor(initial_value)
@tc.returns(ops.Tensor)
@tc.accepts(ops.Tensor, ops.Tensor)
def tf_fn(accumulator, next_element):
accumulator_lt = core.LabeledTensor(accumulator, initial_value.axes)
next_element_lt = core.LabeledTensor(
next_element, list(labeled_tensor.axes.values())[1:])
return fn(accumulator_lt, next_element_lt).tensor
foldl_op = functional_ops.foldl(
tf_fn, labeled_tensor.tensor, initializer=initial_value.tensor)
foldl_lt = core.LabeledTensor(foldl_op, initial_value.axes)
return core.identity(foldl_lt, name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Optional(tc.Collection(string_types)), tc.Optional(string_types))
def squeeze(labeled_tensor, axis_names=None, name=None):
"""Remove size-1 dimensions.
See tf.squeeze.
Args:
labeled_tensor: The input tensor.
axis_names: The names of the dimensions to remove, or None to remove
all size-1 dimensions.
name: Optional op name.
Returns:
A tensor with the specified dimensions removed.
Raises:
ValueError: If the named axes are not in the tensor, or if they are
not size-1.
"""
with ops.name_scope(name, 'lt_squeeze', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if axis_names is None:
axis_names = [a.name for a in labeled_tensor.axes.values() if len(a) == 1]
for axis_name in axis_names:
if axis_name not in labeled_tensor.axes:
raise ValueError('axis %s is not in tensor axes %s' %
(axis_name, labeled_tensor.axes))
elif len(labeled_tensor.axes[axis_name]) != 1:
raise ValueError(
'cannot squeeze axis with size greater than 1: (%s, %s)' %
(axis_name, labeled_tensor.axes[axis_name]))
squeeze_dimensions = []
axes = []
for i, axis in enumerate(labeled_tensor.axes.values()):
if axis.name in axis_names:
squeeze_dimensions.append(i)
else:
axes.append(axis)
if squeeze_dimensions:
squeeze_op = array_ops.squeeze(
labeled_tensor.tensor, squeeze_dimensions, name=scope)
else:
squeeze_op = array_ops.identity(labeled_tensor.tensor, name=scope)
return core.LabeledTensor(squeeze_op, axes)
# pylint: disable=invalid-name
ReduceAxis = tc.Union(string_types,
tc.Tuple(string_types, collections.Hashable))
ReduceAxes = tc.Optional(tc.Union(ReduceAxis, tc.Collection(ReduceAxis)))
# pylint: enable=invalid-name
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, core.LabeledTensorLike,
tc.Optional(string_types))
def matmul(a, b, name=None):
"""Matrix multiply two tensors with rank 1 or 2.
If both tensors have rank 2, a matrix-matrix product is performed.
If one tensor has rank 1 and the other has rank 2, then a matrix-vector
product is performed.
If both tensors have rank 1, then a vector dot-product is performed.
(This behavior matches that of `numpy.dot`.)
Both tensors must share exactly one dimension in common, which is the
dimension the operation is summed along. The inputs will be automatically
transposed if necessary as part of the matmul op.
We intend to eventually support `matmul` on higher rank input, and also
eventually support summing over any number shared dimensions (via an `axis`
argument), but neither of these features has been implemented yet.
Args:
a: First LabeledTensor.
b: Second LabeledTensor.
name: Optional op name.
Returns:
LabeledTensor with the result of matrix multiplication. Axes are ordered by
the current axis_order_scope, if set, or in or order of appearance on the
inputs.
Raises:
NotImplementedError: If inputs have rank >2 or share multiple axes.
ValueError: If the inputs have rank 0 or do not share any axes.
"""
with ops.name_scope(name, 'lt_matmul', [a, b]) as scope:
a = core.convert_to_labeled_tensor(a)
b = core.convert_to_labeled_tensor(b)
if len(a.axes) > 2 or len(b.axes) > 2:
# We could pass batched inputs to tf.matmul to make this work, but we
# would also need to use tf.tile and/or tf.transpose. These are more
# expensive than doing reshapes, so it's not clear if it's a good idea to
# do this automatically.
raise NotImplementedError(
'matmul currently requires inputs with rank 2 or less, but '
'inputs have ranks %r and %r' % (len(a.axes), len(b.axes)))
if not a.axes or not b.axes:
raise ValueError(
'matmul currently requires inputs with at least rank 1, but '
'inputs have ranks %r and %r' % (len(a.axes), len(b.axes)))
shared_axes = set(a.axes) & set(b.axes)
if len(shared_axes) > 1:
raise NotImplementedError(
'matmul does not yet support summing over multiple shared axes: %r. '
'Use transpose and reshape to create a single shared axis to sum '
'over.' % shared_axes)
if not shared_axes:
raise ValueError('there must have exactly one axis in common between '
'input to matmul: %r, %r' %
(a.axes.keys(), b.axes.keys()))
shared_axis, = shared_axes
if a.axes[shared_axis] != b.axes[shared_axis]:
raise ValueError('axis %r does not match on input arguments: %r vs %r' %
(shared_axis, a.axes[shared_axis].value,
b.axes[shared_axis].value))
result_axes = []
for axes in [a.axes, b.axes]:
for axis in axes.values():
if axis.name != shared_axis:
result_axes.append(axis)
axis_scope_order = core.get_axis_order()
if axis_scope_order is not None:
result_axis_names = [axis.name for axis in result_axes]
new_axis_names = [
name for name in axis_scope_order if name in result_axis_names
]
if new_axis_names != result_axis_names:
# switch a and b
b, a = a, b
# result_axes is a list of length 1 or 2
result_axes = result_axes[::-1]
squeeze_dims = []
if len(a.axes) == 1:
a_tensor = array_ops.reshape(a.tensor, (1, -1))
squeeze_dims.append(0)
transpose_a = False
else:
a_tensor = a.tensor
transpose_a = list(a.axes.keys()).index(shared_axis) == 0
if len(b.axes) == 1:
b_tensor = array_ops.reshape(b.tensor, (-1, 1))
squeeze_dims.append(1)
transpose_b = False
else:
b_tensor = b.tensor
transpose_b = list(b.axes.keys()).index(shared_axis) == 1
result_op = math_ops.matmul(
a_tensor, b_tensor, transpose_a=transpose_a, transpose_b=transpose_b)
if squeeze_dims:
result_op = array_ops.squeeze(result_op, squeeze_dims)
result_op = array_ops.identity(result_op, name=scope)
return core.LabeledTensor(result_op, result_axes)
@tc.returns(types.FunctionType)
@tc.accepts(string_types, collections.Callable)
def define_reduce_op(op_name, reduce_fn):
"""Define a reduction op for labeled tensors.
Args:
op_name: string name of the TensorFlow op.
reduce_fn: function to call to evaluate the op on a tf.Tensor.
Returns:
Function defining the given reduction op that acts on a LabeledTensor.
"""
default_name = 'lt_%s' % op_name
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, ReduceAxes, tc.Optional(string_types))
def op(labeled_tensor, axes=None, name=None):
"""Computes the given reduction across the given axes of a LabeledTensor.
See `tf.{op_name}` for full details.
Args:
labeled_tensor: The input tensor.
axes: A set of axes or None.
If None, all axes will be reduced.
Axes must all be strings, in which case those dimensions will be
removed, or pairs of (name, None) or (name, label), in which case those
dimensions will be kept.
name: Optional op name.
Returns:
The reduced LabeledTensor.
Raises:
ValueError: if any of the axes to reduce over are not found on
`labeled_tensor`.
"""
with ops.name_scope(name, default_name, [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if axes is None:
axes = labeled_tensor.axes.keys()
if isinstance(axes, (string_types, tuple)):
axes = [axes]
reduction_axes = {}
axes_to_squeeze = []
for a in axes:
if isinstance(a, string_types):
# We squeeze out this axis.
reduction_axes[a] = a
axes_to_squeeze.append(a)
else:
# We keep this axis, with the user-provided labels.
(axis_name, label) = a
if label is not None:
# The input was a single label, so make it a list so it can be
# turned into an Axis.
label = [label]
reduction_axes[axis_name] = (axis_name, label)
for axis_name in reduction_axes:
if axis_name not in labeled_tensor.axes:
raise ValueError('Axis %s not in axes %s' %
(axis_name, labeled_tensor.axes))
intermediate_axes = []
reduction_dimensions = []
for i, axis in enumerate(labeled_tensor.axes.values()):
if axis.name in reduction_axes:
intermediate_axes.append(reduction_axes[axis.name])
reduction_dimensions.append(i)
else:
intermediate_axes.append(axis)
reduce_op = reduce_fn(
labeled_tensor.tensor, reduction_dimensions, keepdims=True)
reduce_lt = core.LabeledTensor(reduce_op, intermediate_axes)
return squeeze(reduce_lt, axes_to_squeeze, name=scope)
op.__doc__ = op.__doc__.format(op_name=op_name)
op.__name__ = op_name
return op
reduce_all = define_reduce_op('reduce_all', math_ops.reduce_all)
reduce_any = define_reduce_op('reduce_any', math_ops.reduce_any)
reduce_logsumexp = define_reduce_op('reduce_logsumexp',
math_ops.reduce_logsumexp)
reduce_max = define_reduce_op('reduce_max', math_ops.reduce_max)
reduce_mean = define_reduce_op('reduce_mean', math_ops.reduce_mean)
reduce_min = define_reduce_op('reduce_min', math_ops.reduce_min)
reduce_prod = define_reduce_op('reduce_prod', math_ops.reduce_prod)
reduce_sum = define_reduce_op('reduce_sum', math_ops.reduce_sum)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Mapping(str, tc.Union(int, ops.Tensor)),
tc.Optional(string_types))
def tile(labeled_tensor, multiples, name=None):
"""Constructs a tensor by tiling a given tensor.
Only axes without tick-labels can be tiled. (Otherwise, axis labels on tiled
tensors would no longer be unique.)
See lt.tile.
Args:
labeled_tensor: The input tensor.
multiples: A mapping where the keys are axis names and the values are the
integer number of times to tile along that axis. Only axes with a multiple
different than 1 need be included.
name: Optional op name.
Returns:
A tensor with the indicated axes tiled.
Raises:
ValueError: If the tiled axes are not axes in the input tensor, or if any
axes in multiples have tick labels.
"""
with ops.name_scope(name, 'lt_tile', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if not set(multiples.keys()) <= set(labeled_tensor.axes.keys()):
raise ValueError('tile axes %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(multiples.keys(), labeled_tensor.axes))
labeled_axes = [
name for name in multiples
if labeled_tensor.axes[name].labels is not None
]
if labeled_axes:
raise ValueError('cannot tile axes with tick labels: %r' % labeled_axes)
multiples_list = [multiples.get(name, 1) for name in labeled_tensor.axes]
tile_op = array_ops.tile(labeled_tensor.tensor, multiples_list, name=scope)
new_axes = [
axis.name if axis.labels is None else axis
for axis in labeled_tensor.axes.values()
]
return core.LabeledTensor(tile_op, new_axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Mapping(str, tc.Tuple(core.AxisValue, core.AxisValue)),
string_types, tc.Optional(string_types))
def pad(labeled_tensor, paddings, mode='CONSTANT', name=None):
"""Pads a tensor.
See tf.pad.
Args:
labeled_tensor: The input tensor.
paddings: A mapping where the keys are axis names and the values are
tuples where the first element is the padding to insert at the beginning
of the axis and the second is the padding to insert at the end of the
axis.
mode: One of "CONSTANT", "REFLECT", or "SYMMETRIC".
name: Optional op name.
Returns:
A tensor with the indicated axes padded, optionally with those axes extended
with the provided labels.
Raises:
ValueError: If the padded axes are not axes in the input tensor.
"""
with ops.name_scope(name, 'lt_pad', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if not set(paddings.keys()) <= set(labeled_tensor.axes.keys()):
raise ValueError('pad axes %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(paddings.keys(), labeled_tensor.axes))
new_axes = []
padding_pairs = []
for name, axis in labeled_tensor.axes.items():
if name in paddings:
padding_before, padding_after = paddings[name]
axis_before = core.Axis(name, padding_before)
axis_after = core.Axis(name, padding_after)
new_axes.append(core.concat_axes([axis_before, axis, axis_after]))
padding_pairs.append((len(axis_before), len(axis_after)))
else:
new_axes.append(axis)
padding_pairs.append((0, 0))
pad_op = array_ops.pad(labeled_tensor.tensor,
padding_pairs,
mode,
name=scope)
return core.LabeledTensor(pad_op, new_axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(
tc.Union(np.ndarray, list, tuple, core.Scalar),
tc.Optional(dtypes.DType),
tc.Optional(
tc.Union(core.Axes, tc.Collection(
tc.Union(string_types, core.AxisLike)))), tc.Optional(string_types))
def constant(value, dtype=None, axes=None, name=None):
"""Creates a constant tensor.
If `axes` includes any strings, shape is inferred from `value`. Otherwise,
the sizes of the given `axes` are used to set `shape` for `tf.constant`.
See tf.constant for more details.
Args:
value: The input tensor.
dtype: The type of the returned tensor.
axes: Optional Axes, list of strings or list of objects coercible to Axis
objects. By default, axes are assumed to be an empty list (i.e., `value`
is treated as a scalar).
name: Optional op name.
Returns:
The tensor with elements set to zero.
"""
with ops.name_scope(name, 'lt_constant', [value]) as scope:
if axes is None:
axes = []
if isinstance(axes, core.Axes):
axes = axes.values()
if any(isinstance(ax, string_types) for ax in axes):
# need to infer shape
shape = None
else:
# axes already indicate shape
axes = [core.as_axis(a) for a in axes]
shape = [a.size for a in axes]
op = array_ops.constant(value, dtype=dtype, shape=shape, name=scope)
return core.LabeledTensor(op, axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Optional(dtypes.DType), tc.Optional(string_types))
def zeros_like(labeled_tensor, dtype=None, name=None):
"""Creates an identical tensor with all elements set to zero.
Args:
labeled_tensor: The input tensor.
dtype: The type of the returned tensor.
name: Optional op name.
Returns:
The tensor with elements set to zero.
"""
with ops.name_scope(name, 'lt_zeros_like', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = array_ops.zeros_like(labeled_tensor.tensor, dtype=dtype, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Optional(dtypes.DType), tc.Optional(string_types))
def ones_like(labeled_tensor, dtype=None, name=None):
"""Creates an identical tensor with all elements set to one.
Args:
labeled_tensor: The input tensor.
dtype: The type of the returned tensor.
name: Optional op name.
Returns:
The tensor with elements set to one.
"""
with ops.name_scope(name, 'lt_ones_like', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = array_ops.ones_like(labeled_tensor.tensor, dtype=dtype, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Optional(dtypes.DType), tc.Optional(string_types))
def cast(labeled_tensor, dtype=None, name=None):
"""Casts a labeled tensor to a new type.
Args:
labeled_tensor: The input tensor.
dtype: The type of the returned tensor.
name: Optional op name.
Returns:
A labeled tensor with the new dtype.
"""
with ops.name_scope(name, 'lt_cast', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = math_ops.cast(labeled_tensor.tensor, dtype=dtype, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, string_types, tc.Optional(string_types))
def verify_tensor_all_finite(labeled_tensor, message, name=None):
"""Asserts a tensor doesn't contain NaNs or Infs.
See tf.verify_tensor_all_finite.
Args:
labeled_tensor: The input tensor.
message: Message to log on failure.
name: Optional op name.
Returns:
The input tensor.
"""
with ops.name_scope(name, 'lt_verify_tensor_all_finite',
[labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = numerics.verify_tensor_all_finite(
labeled_tensor.tensor, msg=message, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, core.LabeledTensorLike,
tc.Optional(string_types))
def boolean_mask(labeled_tensor, mask, name=None):
"""Apply a boolean mask to a labeled tensor.
Unlike `tf.boolean_mask`, this currently only works on 1-dimensional masks.
The mask is applied to the first axis of `labeled_tensor`. Labels on the first
axis are removed, because True indices in `mask` may not be known dynamically.
Args:
labeled_tensor: The input tensor.
mask: The type of the returned tensor.
name: Optional op name.
Returns:
The masked labeled tensor.
Raises:
ValueError: if the first axis of the mask
"""
with ops.name_scope(name, 'lt_boolean_mask', [labeled_tensor, mask]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
mask = core.convert_to_labeled_tensor(mask)
if len(mask.axes) > 1:
raise NotImplementedError(
"LabeledTensor's boolean_mask currently only supports 1D masks")
mask_axis = list(mask.axes.values())[0]
lt_axis = list(labeled_tensor.axes.values())[0]
if mask_axis != lt_axis:
raise ValueError('the first axis of the labeled tensor and the mask '
'are not equal:\n%r\n%r' % (lt_axis, mask_axis))
op = array_ops.boolean_mask(labeled_tensor.tensor, mask.tensor, name=scope)
# TODO(shoyer): attempt to infer labels for the masked values, by calling
# tf.contrib.util.constant_value on the mask?
axes = [lt_axis.name] + list(labeled_tensor.axes.values())[1:]
return core.LabeledTensor(op, axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, core.LabeledTensorLike,
core.LabeledTensorLike, tc.Optional(string_types))
def where(condition, x, y, name=None):
"""Return elements from x or y depending on condition.
See `tf.where` for more details. This function currently only implements the
three argument version of where.
Args:
condition: LabeledTensor of type `bool`.
x: LabeledTensor for values where condition is true.
y: LabeledTensor for values where condition is false.
name: Optional op name.
Returns:
The labeled tensor with values according to condition.
Raises:
ValueError: if `x` and `y` have different axes, or if the axes of `x` do not
start with the axes of `condition`.
"""
with ops.name_scope(name, 'lt_where', [condition, x, y]) as scope:
condition = core.convert_to_labeled_tensor(condition)
x = core.convert_to_labeled_tensor(x)
y = core.convert_to_labeled_tensor(y)
if not condition.axes == x.axes == y.axes:
raise ValueError('all inputs to `where` must have equal axes')
op = array_ops.where(condition.tensor, x.tensor, y.tensor, name=scope)
return core.LabeledTensor(op, x.axes)
| apache-2.0 |
ky822/scikit-learn | examples/linear_model/lasso_dense_vs_sparse_data.py | 348 | 1862 | """
==============================
Lasso on dense and sparse data
==============================
We show that linear_model.Lasso provides the same results for dense and sparse
data and that in the case of sparse data the speed is improved.
"""
print(__doc__)
from time import time
from scipy import sparse
from scipy import linalg
from sklearn.datasets.samples_generator import make_regression
from sklearn.linear_model import Lasso
###############################################################################
# The two Lasso implementations on Dense data
print("--- Dense matrices")
X, y = make_regression(n_samples=200, n_features=5000, random_state=0)
X_sp = sparse.coo_matrix(X)
alpha = 1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
t0 = time()
sparse_lasso.fit(X_sp, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(X, y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
###############################################################################
# The two Lasso implementations on Sparse data
print("--- Sparse matrices")
Xs = X.copy()
Xs[Xs < 2.5] = 0.0
Xs = sparse.coo_matrix(Xs)
Xs = Xs.tocsc()
print("Matrix density : %s %%" % (Xs.nnz / float(X.size) * 100))
alpha = 0.1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
t0 = time()
sparse_lasso.fit(Xs, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(Xs.toarray(), y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
| bsd-3-clause |
IsacLira/data-science-cookbook | 2016/recommendation-systems/exercise_utils.py | 2 | 1251 | from math import sqrt
import pandas as pd
def get_users_interests():
return pd.read_csv('interest.csv', sep=',', header=None, index_col=0).values
def get_users_interests_poor():
return [
["Hadoop", "Big Data", "HBase", "Java", "Spark", "Storm", "Cassandra"],
["NoSQL", "MongoDB", "Cassandra", "HBase", "Postgres"],
["Python", "scikit-learn", "scipy", "numpy", "statsmodels", "pandas"],
["R", "Python", "statistics", "regression", "probability"],
["machine learning", "regression", "decision trees", "libsvm"],
["Python", "R", "Java", "C++", "Haskell", "programming languages"],
["statistics", "probability", "mathematics", "theory"],
["machine learning", "scikit-learn", "Mahout", "neural networks"],
["neural networks", "deep learning", "Big Data", "artificial intelligence"],
["Hadoop", "Java", "MapReduce", "Big Data"],
["statistics", "R", "statsmodels"],
["C++", "deep learning", "artificial intelligence", "probability"],
["pandas", "R", "Python"],
["databases", "HBase", "Postgres", "MySQL", "MongoDB"],
["libsvm", "regression", "support vector machines"]
]
def print_matrix(m):
for r in m:
print r | mit |
FiratGundogdu/dagitik | odev01/odev01.py | 1 | 1431 |
import numpy
import matplotlib.pyplot as plt
m1 = -3
s1 = 1
m2 = 5
s2 = 0.5
print "mu 1 = " , m1
print "sigma 1 = " , s1
print "mu 2 = " , m2
print "sigma 2 = " , s2
a1 = numpy.random.normal(m1,s1,10000)
a2 = numpy.random.normal(m2,s2,10000)
print "random array1= ",a1
print "random array2= ",a2
count1 = 0
count2 = 0
h1 = [0.0]*41 #histogram1
h2 = [0.0]*41 #histogram2
#histograma yerlestirme
for i in range(len(a1)):
a1[i]=round(a1[i])
if a1[i]<=20 and a1[i]>=-20:
count1+=1
h1[int(a1[i])+20]+=1
for n in range(len(a2)):
a2[n]=round(a2[n])
if a2[n]<=20 and a2[n]>=-20:
count2+=1
h2[int(a2[n])+20]+=1
print "histogram1= ", h1
print "histogram2= ", h2
#normalizasyon
for x in range(len(h1)):
h1[x]=h1[x]/count1
for y in range(len(h2)):
h2[y]=h2[y]/count2
print "normalize histogram1= ",h1
print "normalize histogram2=", h2
#plot
plt.axis((-20,20,0,1))
plt.bar(range(-20,21), h1,color='blue')
plt.bar(range(-20,21), h2,color='red')
plt.show()
#distance
i = 0
j = 0
sum=0
while(i<40 and j<40):
if h1[i] == 0:
i+=1
continue
if h2[j] == 0:
j+=1
continue
if h1[i]<h2[j]:
sum=sum+h1[i]*abs(i-j)
h1[i]=0
h2[j]=h2[j]-h1[i]
continue
if h1[i]>=h2[j]:
sum=sum+h1[j]*abs(i-j)
h2[j]=0
h1[i]=h1[i]-h2[j]
continue
print "distance= ",sum
| gpl-2.0 |
great-expectations/great_expectations | great_expectations/expectations/metrics/table_metrics/table_column_count.py | 1 | 2803 | from typing import Any, Dict, Optional, Tuple
from great_expectations.core import ExpectationConfiguration
from great_expectations.execution_engine import (
ExecutionEngine,
PandasExecutionEngine,
SparkDFExecutionEngine,
)
from great_expectations.execution_engine.execution_engine import MetricDomainTypes
from great_expectations.execution_engine.sqlalchemy_execution_engine import (
SqlAlchemyExecutionEngine,
)
from great_expectations.expectations.metrics.metric_provider import metric_value
from great_expectations.expectations.metrics.table_metric import TableMetricProvider
from great_expectations.validator.validation_graph import MetricConfiguration
class TableColumnCount(TableMetricProvider):
metric_name = "table.column_count"
@metric_value(engine=PandasExecutionEngine)
def _pandas(
cls,
execution_engine: "ExecutionEngine",
metric_domain_kwargs: Dict,
metric_value_kwargs: Dict,
metrics: Dict[Tuple, Any],
runtime_configuration: Dict,
):
columns = metrics.get("table.columns")
return len(columns)
@metric_value(engine=SqlAlchemyExecutionEngine)
def _sqlalchemy(
cls,
execution_engine: "ExecutionEngine",
metric_domain_kwargs: Dict,
metric_value_kwargs: Dict,
metrics: Dict[Tuple, Any],
runtime_configuration: Dict,
):
columns = metrics.get("table.columns")
return len(columns)
@metric_value(engine=SparkDFExecutionEngine)
def _spark(
cls,
execution_engine: "ExecutionEngine",
metric_domain_kwargs: Dict,
metric_value_kwargs: Dict,
metrics: Dict[Tuple, Any],
runtime_configuration: Dict,
):
columns = metrics.get("table.columns")
return len(columns)
@classmethod
def _get_evaluation_dependencies(
cls,
metric: MetricConfiguration,
configuration: Optional[ExpectationConfiguration] = None,
execution_engine: Optional[ExecutionEngine] = None,
runtime_configuration: Optional[dict] = None,
):
dependencies: dict = super()._get_evaluation_dependencies(
metric=metric,
configuration=configuration,
execution_engine=execution_engine,
runtime_configuration=runtime_configuration,
)
table_domain_kwargs: dict = {
k: v
for k, v in metric.metric_domain_kwargs.items()
if k != MetricDomainTypes.COLUMN.value
}
dependencies["table.columns"] = MetricConfiguration(
metric_name="table.columns",
metric_domain_kwargs=table_domain_kwargs,
metric_value_kwargs=None,
metric_dependencies=None,
)
return dependencies
| apache-2.0 |
lin-credible/scikit-learn | examples/svm/plot_oneclass.py | 249 | 2302 | """
==========================================
One-class SVM with non-linear kernel (RBF)
==========================================
An example using a one-class SVM for novelty detection.
:ref:`One-class SVM <svm_outlier_detection>` is an unsupervised
algorithm that learns a decision function for novelty detection:
classifying new data as similar or different to the training set.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-5, 5, 500), np.linspace(-5, 5, 500))
# Generate train data
X = 0.3 * np.random.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * np.random.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = np.random.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
y_pred_train = clf.predict(X_train)
y_pred_test = clf.predict(X_test)
y_pred_outliers = clf.predict(X_outliers)
n_error_train = y_pred_train[y_pred_train == -1].size
n_error_test = y_pred_test[y_pred_test == -1].size
n_error_outliers = y_pred_outliers[y_pred_outliers == 1].size
# plot the line, the points, and the nearest vectors to the plane
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.title("Novelty Detection")
plt.contourf(xx, yy, Z, levels=np.linspace(Z.min(), 0, 7), cmap=plt.cm.Blues_r)
a = plt.contour(xx, yy, Z, levels=[0], linewidths=2, colors='red')
plt.contourf(xx, yy, Z, levels=[0, Z.max()], colors='orange')
b1 = plt.scatter(X_train[:, 0], X_train[:, 1], c='white')
b2 = plt.scatter(X_test[:, 0], X_test[:, 1], c='green')
c = plt.scatter(X_outliers[:, 0], X_outliers[:, 1], c='red')
plt.axis('tight')
plt.xlim((-5, 5))
plt.ylim((-5, 5))
plt.legend([a.collections[0], b1, b2, c],
["learned frontier", "training observations",
"new regular observations", "new abnormal observations"],
loc="upper left",
prop=matplotlib.font_manager.FontProperties(size=11))
plt.xlabel(
"error train: %d/200 ; errors novel regular: %d/40 ; "
"errors novel abnormal: %d/40"
% (n_error_train, n_error_test, n_error_outliers))
plt.show()
| bsd-3-clause |
iwelland/hop | doc/examples/markovsampling.py | 1 | 1819 | #!/usr/bin/env python
#---------------- EDIT JOB NAME -------------------
#$ -N MCMC
#--------------------------------------------------
#$ -S /usr/bin/python
#$ -v PYTHONPATH=/home/oliver/Library/python-lib
#$ -v LD_LIBRARY_PATH=/opt/intel/cmkl/8.0/lib/32:/opt/intel/itc60/slib:/opt/intel/ipp41/ia32_itanium/sharedlib:/opt/intel/ipp41/ia32_itanium/sharedlib/linux32:/opt/intel/fc/9.0/lib:/opt/intel/cc/9.0/lib
#$ -r n
#$ -j y
# Using the current working directory is IMPORTANT with the default settings for Job()
#$ -cwd
#$ -m e
# $Id$
from staging.SunGridEngine import Job
#------------------------------------------------------------
# EDIT THE inputfiles AND outputfiles DICTIONARIES.
#------------------------------------------------------------
# record input and output files relative to top_dir = cwd
job = Job(variables=dict(state='apo'),
inputfiles=dict(hopgraph='analysis/hopgraph.pickle'
),
outputfiles=dict(scan = 'analysis/pscan.pickle',
occupancy_pdf = 'figs/mcmc_occupancy.pdf',
correl_pdf = 'figs/mcmc_occupancy_correl.pdf',
))
#
#------------------------------------------------------------
job.stage()
F = job.filenames
V = job.variables
# commands
import hop.utilities # must come first (for reasons unknown)
hop.utilities.matplotlib_interactive(False) # no X11 available
import hop.MCMC
from pylab import *
M = hop.MCMC.Pscan(Ntotal=1e7)
M.save(F['scan'])
figure(1)
M.plot_occupancy()
title('occupancy (I-FABP %(state)s)' % V)
savefig(F['occupancy_pdf'])
figure(2)
M.plot_correl()
title('occupancy correlation with MD (I-FABP %(state)s)' % V)
xlim(0,1.05)
ylim(0,1.02)
savefig(F['correl_pdf'])
job.unstage()
job.cleanup()
| lgpl-3.0 |
degoldschmidt/ribeirolab-codeconversion | python/flyPAD/fp_rawtrace.py | 1 | 8337 | #!/usr/bin/env python
"""
Script for extracting & plotting raw capacitance signals from flyPAD data file/s
###
Usage:
- rawtrace inputfile
for plotting from one specified file
- rawtrace inputfile1 inputfile2 ...
for plotting multiple specified files
- rawtrace inputdir
for plotting from all files in a specified directory
- rawtrace
opens gui filedialog for selecting files
"""
# import packages
import os, sys
from tkinter import *
from tkinter import messagebox, filedialog
import json as js
from datetime import datetime as dt
import matplotlib
matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
import numpy as np
from vispy import plot as vp
import scipy as sp
import scipy.signal as sg
from scipy.signal import hilbert
from itertools import groupby
import platform
from helper import *
# metadata
__author__ = "Dennis Goldschmidt"
__copyright__ = "2017"
__credits__ = ["Dennis Goldschmidt"]
__license__ = "GNU GENERAL PUBLIC LICENSE v3"
__version__ = "0.1"
__maintainer__ = "Dennis Goldschmidt"
__email__ = "[email protected]"
__status__ = "In development"
def arg2files(_args):
files = []
for arg in _args:
if os.path.isfile(arg):
files.append(arg)
if os.path.isdir(arg):
for _file in os.listdir(arg):
if os.path.isfile(os.path.join(arg, _file)) \
and is_binary_cap(os.path.join(arg, _file)):
files.append(arg+_file)
return files
def len_iter(items):
return sum(1 for _ in items)
def consecutive_one(data):
return max(len_iter(run) for val, run in groupby(data) if val)
def get_data(_file, dur=360000, nch=64):
with open(_file, 'rb') as f: # with opening
cap_data = np.fromfile(f, dtype=np.ushort) # read binary data into numpy ndarray (1-dim.)
rows = cap_data.shape[0] # to shorten next line
cap_data = (cap_data.reshape(nch, int(rows/nch), order='F').copy()) # reshape array into 64-dim. matrix and take the transpose (rows = time, cols = channels)
if np.isfinite(dur) and dur < cap_data.shape[1]:
cap_data = cap_data[:,:dur] # cut off data longer than duration
else:
if dur > cap_data.shape[1]: # warning
print("Warning: data shorter than given duration")
#cap_data[cap_data==-1]=0
return cap_data
def get_datetime(_file):
return dt.strptime(_file[-19:], '%Y-%m-%dT%H_%S_%M') # timestamp of file
def is_binary_cap(_file):
with open(_file, 'rb') as f:
if b'\x00' in f.read():
if has_timestamp(_file): # TODO: has timestamp function
return True
else:
return False
else:
return False
def has_timestamp(_file, printit=False):
try:
this_time = dt.strptime(_file[-8:], '%H_%M_%S')
except ValueError:
return False
else:
if printit:
print(this_time)
return True
def get_median_filtered(signal, threshold=3):
signal = signal.copy()
difference = np.abs(signal - np.median(signal))
median_difference = np.median(difference)
if median_difference == 0:
s = 0
else:
s = difference / float(median_difference)
mask = s > threshold
signal[mask] = np.median(signal)
return signal
def main(argv):
START = 0
STOP = 64
STEP = 2
# colors for plotting
colz = ["#C900E5", "#C603E1", "#C306DD", "#C009DA", "#BD0CD6", "#BA0FD2", "#B812CF", "#B515CB", "#B218C7", "#AF1BC4", "#AC1EC0", "#A921BD", "#A724B9", "#A427B5", "#A12AB2", "#9E2DAE", "#9B30AA", "#9833A7", "#9636A3", "#93399F", "#903C9C", "#8D3F98", "#8A4295", "#884591", "#85488D", "#824B8A", "#7F4E86", "#7C5182", "#79547F", "#77577B", "#745A77", "#715D74", "#6E6170", "#6B646D", "#686769", "#666A65", "#636D62", "#60705E", "#5D735A", "#5A7657", "#577953", "#557C4F", "#527F4C", "#4F8248", "#4C8545", "#498841", "#478B3D", "#448E3A", "#419136", "#3E9432", "#3B972F", "#389A2B", "#369D27", "#33A024", "#30A320", "#2DA61D", "#2AA919", "#27AC15", "#25AF12", "#22B20E", "#1FB50A", "#1CB807", "#19BB03", "#17BF00"]
# go through list of arguments and check for existing files and dirs
files = arg2files(argv)
# open filedialog for files
if len(argv)==0:
Tk().withdraw()
files = filedialog.askopenfilenames(title='Choose file/s to load')
fs = 100.
N = 360000
t = np.arange(N)/float(fs)
figs = []
for ind, _file in enumerate(files):
print(_file)
figs.append(vp.Fig(size=(1600, 1000), show=False))
fig = figs[-1]
plt_even = fig[0, 0]
plt_odd = fig[1, 0]
plt_even._configure_2d()
plt_odd._configure_2d()
plt_even.xlabel.text = 'Time (s)'
plt_odd.xlabel.text = 'Time (s)'
plt_even.title.text = os.path.basename(_file) + " even CH"
plt_odd.title.text = os.path.basename(_file) + " odd CH"
this_data = get_data(_file)
if N > get_data_len(_file):
N = get_data_len(_file)
t = np.arange(N)/float(fs)
print(this_data.shape)
diff_data = np.zeros(t.shape)
sum_signal = np.zeros(t.shape)
thr = 200
for ch in range(START, STOP, STEP):
if ch%16==0:
print(ch)
""" This one does the magic """
KSIZE = 21 ##501
filtered_signal = sg.medfilt(this_data[ch+1], kernel_size=KSIZE)
filtered_signal = np.abs(filtered_signal-filtered_signal[0]) # positive changes from baseline
thr_signal = filtered_signal > thr
sum_signal += thr_signal
#plt_even.plot(np.array((t, filtered_signal)).T, marker_size=0, color=colz[ch])
plt_even.plot(np.array((t, this_data[ch]+1000*ch)).T, marker_size=0, color=colz[ch])
#plt_even.plot(np.array((t[thr_signal==1], 1000*thr_signal[thr_signal==1])).T, marker_size=0, color='r')
#plt_odd.plot(np.array((t, this_data[ch+1]+1000*ch)).T, marker_size=0, color=colz[ch])
#plt_odd.spectrogram(this_data[ch], fs=fs)
#plt_odd.plot(np.array((t, filtered_signal)).T, marker_size=0, color=colz[ch])
plt_odd.plot(np.array((t, this_data[ch+1]+1000*ch)).T, marker_size=0, color=colz[ch])
ch_thr = 24
thr_sum_signal = sum_signal > ch_thr
if np.count_nonzero(thr_sum_signal) > 0:
print(np.count_nonzero(thr_sum_signal), consecutive_one(thr_sum_signal))
if(consecutive_one(thr_sum_signal) > 500):
print("Noise detected at", (np.nonzero(thr_sum_signal)[0])[0]/fs , "secs")
else:
print("No noise detected.")
else:
print("No noise detected.")
#plt_even.plot(np.array((t, (1000/32)*sum_signal)).T, marker_size=0, width=1, color='b')
plt_even.plot(np.array((t, (1000)*thr_sum_signal-1000)).T, marker_size=0, width=2, color='r')
plt_odd.plot(np.array((t, (1000)*thr_sum_signal-1000)).T, marker_size=0, width=2, color='r')
for fig in figs:
fig.show(run=True)
# if no files are given
if len(files) == 0:
print("WARNING: No valid files specified.")
"""
from pyqtgraph.Qt import QtGui, QtCore
import pyqtgraph as pg
import numpy as np
plt = pg.plot(np.random.normal(size=100), title="Simplest possible plotting example")
plt.getAxis('bottom').setTicks([[(x*20, str(x*20)) for x in range(6)]])
## Start Qt event loop unless running in interactive mode or using pyside.
#ex = pg.exporters.SVGExporter.SVGExporter(plt.plotItem.scene())
#ex.export('/home/luke/tmp/test.svg')
if __name__ == '__main__':
import sys
if sys.flags.interactive != 1 or not hasattr(QtCore, 'PYQT_VERSION'):
pg.QtGui.QApplication.exec_()
"""
if __name__ == "__main__":
if "Darwin" in platform.platform():
print("Is a Mac.")
quit()
main(sys.argv[1:])
| gpl-3.0 |
Lawrence-Liu/scikit-learn | sklearn/decomposition/pca.py | 192 | 23117 | """ Principal Component Analysis
"""
# Author: Alexandre Gramfort <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis A. Engemann <[email protected]>
# Michael Eickenberg <[email protected]>
#
# License: BSD 3 clause
from math import log, sqrt
import numpy as np
from scipy import linalg
from scipy.special import gammaln
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, as_float_array
from ..utils import check_array
from ..utils.extmath import fast_dot, fast_logdet, randomized_svd
from ..utils.validation import check_is_fitted
def _assess_dimension_(spectrum, rank, n_samples, n_features):
"""Compute the likelihood of a rank ``rank`` dataset
The dataset is assumed to be embedded in gaussian noise of shape(n,
dimf) having spectrum ``spectrum``.
Parameters
----------
spectrum: array of shape (n)
Data spectrum.
rank: int
Tested rank value.
n_samples: int
Number of samples.
n_features: int
Number of features.
Returns
-------
ll: float,
The log-likelihood
Notes
-----
This implements the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
"""
if rank > len(spectrum):
raise ValueError("The tested rank cannot exceed the rank of the"
" dataset")
pu = -rank * log(2.)
for i in range(rank):
pu += (gammaln((n_features - i) / 2.)
- log(np.pi) * (n_features - i) / 2.)
pl = np.sum(np.log(spectrum[:rank]))
pl = -pl * n_samples / 2.
if rank == n_features:
pv = 0
v = 1
else:
v = np.sum(spectrum[rank:]) / (n_features - rank)
pv = -np.log(v) * n_samples * (n_features - rank) / 2.
m = n_features * rank - rank * (rank + 1.) / 2.
pp = log(2. * np.pi) * (m + rank + 1.) / 2.
pa = 0.
spectrum_ = spectrum.copy()
spectrum_[rank:n_features] = v
for i in range(rank):
for j in range(i + 1, len(spectrum)):
pa += log((spectrum[i] - spectrum[j]) *
(1. / spectrum_[j] - 1. / spectrum_[i])) + log(n_samples)
ll = pu + pl + pv + pp - pa / 2. - rank * log(n_samples) / 2.
return ll
def _infer_dimension_(spectrum, n_samples, n_features):
"""Infers the dimension of a dataset of shape (n_samples, n_features)
The dataset is described by its spectrum `spectrum`.
"""
n_spectrum = len(spectrum)
ll = np.empty(n_spectrum)
for rank in range(n_spectrum):
ll[rank] = _assess_dimension_(spectrum, rank, n_samples, n_features)
return ll.argmax()
class PCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA)
Linear dimensionality reduction using Singular Value Decomposition of the
data and keeping only the most significant singular vectors to project the
data to a lower dimensional space.
This implementation uses the scipy.linalg implementation of the singular
value decomposition. It only works for dense arrays and is not scalable to
large dimensional data.
The time complexity of this implementation is ``O(n ** 3)`` assuming
n ~ n_samples ~ n_features.
Read more in the :ref:`User Guide <PCA>`.
Parameters
----------
n_components : int, None or string
Number of components to keep.
if n_components is not set all components are kept::
n_components == min(n_samples, n_features)
if n_components == 'mle', Minka\'s MLE is used to guess the dimension
if ``0 < n_components < 1``, select the number of components such that
the amount of variance that needs to be explained is greater than the
percentage specified by n_components
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by n_samples times singular values to ensure uncorrelated outputs
with unit component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making there data respect some hard-wired assumptions.
Attributes
----------
components_ : array, [n_components, n_features]
Principal axes in feature space, representing the directions of
maximum variance in the data.
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components.
If ``n_components`` is not set then all components are stored and the
sum of explained variances is equal to 1.0
mean_ : array, [n_features]
Per-feature empirical mean, estimated from the training set.
n_components_ : int
The estimated number of components. Relevant when n_components is set
to 'mle' or a number between 0 and 1 to select using explained
variance.
noise_variance_ : float
The estimated noise covariance following the Probabilistic PCA model
from Tipping and Bishop 1999. See "Pattern Recognition and
Machine Learning" by C. Bishop, 12.2.1 p. 574 or
http://www.miketipping.com/papers/met-mppca.pdf. It is required to
computed the estimated data covariance and score samples.
Notes
-----
For n_components='mle', this class uses the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
Implements the probabilistic PCA model from:
M. Tipping and C. Bishop, Probabilistic Principal Component Analysis,
Journal of the Royal Statistical Society, Series B, 61, Part 3, pp. 611-622
via the score and score_samples methods.
See http://www.miketipping.com/papers/met-mppca.pdf
Due to implementation subtleties of the Singular Value Decomposition (SVD),
which is used in this implementation, running fit twice on the same matrix
can lead to principal components with signs flipped (change in direction).
For this reason, it is important to always use the same estimator object to
transform data in a consistent fashion.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import PCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = PCA(n_components=2)
>>> pca.fit(X)
PCA(copy=True, n_components=2, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
RandomizedPCA
KernelPCA
SparsePCA
TruncatedSVD
"""
def __init__(self, n_components=None, copy=True, whiten=False):
self.n_components = n_components
self.copy = copy
self.whiten = whiten
def fit(self, X, y=None):
"""Fit the model with X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
U, S, V = self._fit(X)
U = U[:, :self.n_components_]
if self.whiten:
# X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)
U *= sqrt(X.shape[0])
else:
# X_new = X * V = U * S * V^T * V = U * S
U *= S[:self.n_components_]
return U
def _fit(self, X):
"""Fit the model on X
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
U, s, V : ndarrays
The SVD of the input data, copied and centered when
requested.
"""
X = check_array(X)
n_samples, n_features = X.shape
X = as_float_array(X, copy=self.copy)
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
U, S, V = linalg.svd(X, full_matrices=False)
explained_variance_ = (S ** 2) / n_samples
explained_variance_ratio_ = (explained_variance_ /
explained_variance_.sum())
components_ = V
n_components = self.n_components
if n_components is None:
n_components = n_features
elif n_components == 'mle':
if n_samples < n_features:
raise ValueError("n_components='mle' is only supported "
"if n_samples >= n_features")
n_components = _infer_dimension_(explained_variance_,
n_samples, n_features)
elif not 0 <= n_components <= n_features:
raise ValueError("n_components=%r invalid for n_features=%d"
% (n_components, n_features))
if 0 < n_components < 1.0:
# number of components for which the cumulated explained variance
# percentage is superior to the desired threshold
ratio_cumsum = explained_variance_ratio_.cumsum()
n_components = np.sum(ratio_cumsum < n_components) + 1
# Compute noise covariance using Probabilistic PCA model
# The sigma2 maximum likelihood (cf. eq. 12.46)
if n_components < n_features:
self.noise_variance_ = explained_variance_[n_components:].mean()
else:
self.noise_variance_ = 0.
# store n_samples to revert whitening when getting covariance
self.n_samples_ = n_samples
self.components_ = components_[:n_components]
self.explained_variance_ = explained_variance_[:n_components]
explained_variance_ratio_ = explained_variance_ratio_[:n_components]
self.explained_variance_ratio_ = explained_variance_ratio_
self.n_components_ = n_components
return (U, S, V)
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
def transform(self, X):
"""Apply the dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = fast_dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
def inverse_transform(self, X):
"""Transform data back to its original space, i.e.,
return an input X_original whose transform would be X
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
"""
check_is_fitted(self, 'mean_')
if self.whiten:
return fast_dot(
X,
np.sqrt(self.explained_variance_[:, np.newaxis]) *
self.components_) + self.mean_
else:
return fast_dot(X, self.components_) + self.mean_
def score_samples(self, X):
"""Return the log-likelihood of each sample
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: array, shape (n_samples,)
Log-likelihood of each sample under the current model
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
Xr = X - self.mean_
n_features = X.shape[1]
log_like = np.zeros(X.shape[0])
precision = self.get_precision()
log_like = -.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1)
log_like -= .5 * (n_features * log(2. * np.pi)
- fast_logdet(precision))
return log_like
def score(self, X, y=None):
"""Return the average log-likelihood of all samples
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: float
Average log-likelihood of the samples under the current model
"""
return np.mean(self.score_samples(X))
class RandomizedPCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA) using randomized SVD
Linear dimensionality reduction using approximated Singular Value
Decomposition of the data and keeping only the most significant
singular vectors to project the data to a lower dimensional space.
Read more in the :ref:`User Guide <RandomizedPCA>`.
Parameters
----------
n_components : int, optional
Maximum number of components to keep. When not given or None, this
is set to n_features (the second dimension of the training data).
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
iterated_power : int, optional
Number of iterations for the power method. 3 by default.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by the singular values to ensure uncorrelated outputs with unit
component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making their data respect some hard-wired assumptions.
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton.
Attributes
----------
components_ : array, [n_components, n_features]
Components with maximum variance.
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components. \
k is not set then all components are stored and the sum of explained \
variances is equal to 1.0
mean_ : array, [n_features]
Per-feature empirical mean, estimated from the training set.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import RandomizedPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = RandomizedPCA(n_components=2)
>>> pca.fit(X) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
RandomizedPCA(copy=True, iterated_power=3, n_components=2,
random_state=None, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
PCA
TruncatedSVD
References
----------
.. [Halko2009] `Finding structure with randomness: Stochastic algorithms
for constructing approximate matrix decompositions Halko, et al., 2009
(arXiv:909)`
.. [MRT] `A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert`
"""
def __init__(self, n_components=None, copy=True, iterated_power=3,
whiten=False, random_state=None):
self.n_components = n_components
self.copy = copy
self.iterated_power = iterated_power
self.whiten = whiten
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X by extracting the first principal components.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(check_array(X))
return self
def _fit(self, X):
"""Fit the model to the data X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
X : ndarray, shape (n_samples, n_features)
The input data, copied, centered and whitened when requested.
"""
random_state = check_random_state(self.random_state)
X = np.atleast_2d(as_float_array(X, copy=self.copy))
n_samples = X.shape[0]
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
U, S, V = randomized_svd(X, n_components,
n_iter=self.iterated_power,
random_state=random_state)
self.explained_variance_ = exp_var = (S ** 2) / n_samples
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
if self.whiten:
self.components_ = V / S[:, np.newaxis] * sqrt(n_samples)
else:
self.components_ = V
return X
def transform(self, X, y=None):
"""Apply dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X = fast_dot(X, self.components_.T)
return X
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X = check_array(X)
X = self._fit(X)
return fast_dot(X, self.components_.T)
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples in the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform does not compute the
exact inverse operation of transform.
"""
check_is_fitted(self, 'mean_')
X_original = fast_dot(X, self.components_)
if self.mean_ is not None:
X_original = X_original + self.mean_
return X_original
| bsd-3-clause |
carrillo/scikit-learn | examples/linear_model/plot_sgd_loss_functions.py | 249 | 1095 | """
==========================
SGD: convex loss functions
==========================
A plot that compares the various convex loss functions supported by
:class:`sklearn.linear_model.SGDClassifier` .
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def modified_huber_loss(y_true, y_pred):
z = y_pred * y_true
loss = -4 * z
loss[z >= -1] = (1 - z[z >= -1]) ** 2
loss[z >= 1.] = 0
return loss
xmin, xmax = -4, 4
xx = np.linspace(xmin, xmax, 100)
plt.plot([xmin, 0, 0, xmax], [1, 1, 0, 0], 'k-',
label="Zero-one loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0), 'g-',
label="Hinge loss")
plt.plot(xx, -np.minimum(xx, 0), 'm-',
label="Perceptron loss")
plt.plot(xx, np.log2(1 + np.exp(-xx)), 'r-',
label="Log loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0) ** 2, 'b-',
label="Squared hinge loss")
plt.plot(xx, modified_huber_loss(xx, 1), 'y--',
label="Modified Huber loss")
plt.ylim((0, 8))
plt.legend(loc="upper right")
plt.xlabel(r"Decision function $f(x)$")
plt.ylabel("$L(y, f(x))$")
plt.show()
| bsd-3-clause |
davidlmorton/spikepy | spikepy/plotting_utils/make_into_signal_axes.py | 1 | 6329 | # Copyright (C) 2012 David Morton
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from types import MethodType
import uuid
import numpy
from spikepy.plotting_utils.downsample_for_plot import downsample_for_plot
from spikepy.plotting_utils.general import is_iterable
def signal_set_xlim(axes, tmin=None, tmax=None, **kwargs):
'''
This set_xlim function replaces the usual matplotlib axes set_xlim
function. It will redraw the signals after having downsampled them.
'''
# don't do anything if locked.
if axes._are_axes_locked:
return
axes.lock_axes()
# parse inputs
if tmax is None and is_iterable(tmin):
tmin, tmax = tmin
if hasattr(axes, '_signal_times'):
for s_id in axes._signal_draw_order:
# don't replot if bounds didn't actually change.
xmin, xmax = axes.get_xlim()
if xmin == tmin and xmax == tmax and\
s_id in axes._signal_lines.keys():
continue
# delete existing lines
if s_id in axes._signal_lines.keys():
axes._signal_lines[s_id].remove()
del axes._signal_lines[s_id]
# downsample
new_signal, new_times = downsample_for_plot(
axes._signals[s_id],
axes._signal_times[s_id],
tmin, tmax, axes._signal_num_samples)
line = axes.plot(new_times, new_signal,
*axes._signal_args[s_id],
**axes._signal_kwargs[s_id])[0]
# save this line so we can remove it later.
axes._signal_lines[s_id] = line
axes.unlock_axes()
# actually change the xlimits
axes._pre_signal_set_xlim(tmin, tmax, **kwargs)
def signal_set_ylim(axes, *args, **kwargs):
# don't do anything if locked.
if axes._are_axes_locked:
return
axes._pre_signal_set_ylim(*args, **kwargs)
if hasattr(axes, '_live_updating_scalebars'):
axes.lock_axes()
axes._create_x_scale_bar(axes)
axes._create_y_scale_bar(axes)
axes.unlock_axes()
def get_signal_yrange(axes, padding=0.05):
all_min = 0
all_max = 0
for s_id in axes._signals.keys():
ymin = numpy.min(axes._signals[s_id])
ymax = numpy.max(axes._signals[s_id])
if ymin < all_min:
all_min = ymin
if ymax > all_max:
all_max = ymax
padding = abs(all_max - all_min)*(padding+1.0)
return(all_min - padding, all_max + padding)
def signal_plot(axes, times, signal, *args, **kwargs):
this_signal_id = uuid.uuid4()
if 'replace_signal_id' in kwargs:
replace_signal_id = kwargs['replace_signal_id']
del kwargs['replace_signal_id']
if hasattr(axes, '_signal_times'):
if replace_signal_id in axes._signal_lines.keys():
# delete the line first.
axes._signal_lines[replace_signal_id].remove()
del axes._signal_lines[replace_signal_id]
del axes._signals[replace_signal_id]
del axes._signal_times[replace_signal_id]
del axes._signal_kwargs[replace_signal_id]
del axes._signal_args[replace_signal_id]
axes._signal_draw_order.remove(replace_signal_id)
else:
raise RuntimeError('Cannot replace signal, signal_id %s does not exist' % replace_signal_id)
else:
raise RuntimeError('Cannot relace a signal because none exist.')
else:
if not hasattr(axes, '_signal_times'):
axes._signal_times = {}
axes._signals = {}
axes._signal_lines = {}
axes._signal_kwargs = {}
axes._signal_draw_order = []
axes._signal_args = {}
# add the new signal
axes._signals[this_signal_id] = signal
axes._signal_times[this_signal_id] = times
axes._signal_kwargs[this_signal_id] = kwargs
axes._signal_draw_order.append(this_signal_id)
axes._signal_args[this_signal_id] = args
axes.set_xlim(axes.get_xlim())
return this_signal_id
def unlock_axes(axes):
axes._are_axes_locked = False
def lock_axes(axes):
axes._are_axes_locked = True
def make_into_signal_axes(axes, num_samples=1500):
'''
Turn axes into an axis which can plot signals efficiently. It will
automatically downsample signals so they draw fast and don't fail to show
extrema properly.
Inputs:
axes: The axes you want to make into a signal_axes.
num_samples: The number (approx) of samples in downsampled signals.
Adds Attributes:
signal_plot: Behaves just like plot, except it expects signals.
get_signal_yrange: Returns a tuple that can be passed to
axes.set_ylim() to show all signal data on plot.
**monkeypatches set_xlim to handle downsampling of signals on the fly.
Returns:
None
'''
if not hasattr(axes, '_is_signal_axes'):
axes._is_signal_axes = True
axes.signal_plot = MethodType(signal_plot, axes, axes.__class__)
axes.get_signal_yrange = MethodType(get_signal_yrange, axes,
axes.__class__)
axes._pre_signal_set_xlim = axes.set_xlim
axes.set_xlim = MethodType(signal_set_xlim, axes, axes.__class__)
axes._pre_signal_set_ylim = axes.set_ylim
axes.set_ylim = MethodType(signal_set_ylim, axes, axes.__class__)
axes.lock_axes = MethodType(lock_axes, axes, axes.__class__)
axes.unlock_axes = MethodType(unlock_axes, axes, axes.__class__)
axes._are_axes_locked = False
axes._signal_num_samples = num_samples
| gpl-3.0 |
lvulliard/KNN | knn.py | 1 | 3019 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
##################################### Help ####################################
"""Simple KNN implementation
Usage:
knn.py [-cn]
knn.py --help
Options:
-c Classification mode
-n Normalize each column
-h --help Show this screen.
"""
################################### Imports ###################################
from docopt import docopt
import numpy as np
import random
import matplotlib.pyplot as plt
################################## Functions ##################################
################################### Classes ###################################
class KNN:
"""A K-nearest neighbors regression"""
def __init__(self, pts, c):
self.points = pts
self.classif = c
if c:
self.maxlabel = int(max(data[self.points,10]))
def predict(self, pt):
dist = np.array([np.linalg.norm(data[refPoint,1:10] - data[pt,1:10]) for refPoint in self.points])
dist_sort = dist.argsort()
nearest_points = [self.points[i] for i in dist_sort[:K]]
# Classification
if self.classif:
labCount = np.empty(self.maxlabel+1)
for i in xrange(self.maxlabel+1):
labCount[i] = list(data[nearest_points,10]).count(i)
return (-labCount).argsort()[0]
# Regression
else:
return sum(data[nearest_points,10])/K
##################################### Main ####################################
# Seed for reproductibility
random.seed(1)
# Import user input
arguments = docopt(__doc__)
DATA_FILE_NAME = "/home/koala/Documents/Scripts/KNN/KNN/glass.data"
NB_CROSSVAL = 10
NORM = arguments["-n"]
CLASSIF = arguments["-c"]
# Load data
data_file = open(DATA_FILE_NAME)
data = data_file.readlines()
data = np.array([line[:-1].split(",") for line in data], dtype=np.float64)
# Shuffle the data
neworder = range(len(data))
random.shuffle(neworder)
data = data[neworder,:]
# Normalize the data if needed
if NORM:
for i in xrange(1,10):
data[:,i] = (data[:,i] - np.mean(data[:,i]))/np.sqrt(np.var(data[:,1]))
print(data)
meanSSE = []
meanAccuracy = []
# Number of neighbors considered
for K in xrange(1,26):
# Ten cross validation
training_length = len(data)/NB_CROSSVAL
KNNs = []
SSE = []
accuracy = []
for i in xrange(NB_CROSSVAL):
test_indices = range(training_length*i, training_length*(i+1))
training_indices = [k for k in range(len(data)) if k not in test_indices]
KNNs.append(KNN(training_indices, CLASSIF))
predictions = np.array([KNNs[i].predict(j) for j in test_indices])
expected = np.array([data[j,10] for j in test_indices])
SSE.append(sum((predictions - expected)**2))
accuracy.append(sum(predictions == expected)/float(len(test_indices)))
meanSSE.append(sum(SSE)/NB_CROSSVAL)
meanAccuracy.append(sum(accuracy)/NB_CROSSVAL)
print meanSSE
plt.plot(range(1,26), meanSSE)
plt.xlabel('K')
plt.ylabel('Mean SSE')
plt.show()
print meanAccuracy
plt.plot(range(1,26), meanAccuracy)
plt.xlabel('K')
plt.ylabel('Mean perfect accuracy')
plt.show()
| gpl-3.0 |
shoyer/xarray | xarray/core/variable.py | 1 | 89464 | import copy
import functools
import itertools
import numbers
import warnings
from collections import defaultdict
from datetime import timedelta
from distutils.version import LooseVersion
from typing import Any, Dict, Hashable, Mapping, Tuple, TypeVar, Union
import numpy as np
import pandas as pd
import xarray as xr # only for Dataset and DataArray
from . import arithmetic, common, dtypes, duck_array_ops, indexing, nputils, ops, utils
from .indexing import (
BasicIndexer,
OuterIndexer,
PandasIndexAdapter,
VectorizedIndexer,
as_indexable,
)
from .npcompat import IS_NEP18_ACTIVE
from .options import _get_keep_attrs
from .pycompat import dask_array_type, integer_types
from .utils import (
OrderedSet,
_default,
decode_numpy_dict_values,
drop_dims_from_indexers,
either_dict_or_kwargs,
ensure_us_time_resolution,
infix_dims,
)
NON_NUMPY_SUPPORTED_ARRAY_TYPES = (
indexing.ExplicitlyIndexed,
pd.Index,
) + dask_array_type
# https://github.com/python/mypy/issues/224
BASIC_INDEXING_TYPES = integer_types + (slice,) # type: ignore
VariableType = TypeVar("VariableType", bound="Variable")
"""Type annotation to be used when methods of Variable return self or a copy of self.
When called from an instance of a subclass, e.g. IndexVariable, mypy identifies the
output as an instance of the subclass.
Usage::
class Variable:
def f(self: VariableType, ...) -> VariableType:
...
"""
class MissingDimensionsError(ValueError):
"""Error class used when we can't safely guess a dimension name.
"""
# inherits from ValueError for backward compatibility
# TODO: move this to an xarray.exceptions module?
def as_variable(obj, name=None) -> "Union[Variable, IndexVariable]":
"""Convert an object into a Variable.
Parameters
----------
obj : object
Object to convert into a Variable.
- If the object is already a Variable, return a shallow copy.
- Otherwise, if the object has 'dims' and 'data' attributes, convert
it into a new Variable.
- If all else fails, attempt to convert the object into a Variable by
unpacking it into the arguments for creating a new Variable.
name : str, optional
If provided:
- `obj` can be a 1D array, which is assumed to label coordinate values
along a dimension of this given name.
- Variables with name matching one of their dimensions are converted
into `IndexVariable` objects.
Returns
-------
var : Variable
The newly created variable.
"""
from .dataarray import DataArray
# TODO: consider extending this method to automatically handle Iris and
if isinstance(obj, DataArray):
# extract the primary Variable from DataArrays
obj = obj.variable
if isinstance(obj, Variable):
obj = obj.copy(deep=False)
elif isinstance(obj, tuple):
try:
obj = Variable(*obj)
except (TypeError, ValueError) as error:
# use .format() instead of % because it handles tuples consistently
raise error.__class__(
"Could not convert tuple of form "
"(dims, data[, attrs, encoding]): "
"{} to Variable.".format(obj)
)
elif utils.is_scalar(obj):
obj = Variable([], obj)
elif isinstance(obj, (pd.Index, IndexVariable)) and obj.name is not None:
obj = Variable(obj.name, obj)
elif isinstance(obj, (set, dict)):
raise TypeError("variable {!r} has invalid type {!r}".format(name, type(obj)))
elif name is not None:
data = as_compatible_data(obj)
if data.ndim != 1:
raise MissingDimensionsError(
"cannot set variable %r with %r-dimensional data "
"without explicit dimension names. Pass a tuple of "
"(dims, data) instead." % (name, data.ndim)
)
obj = Variable(name, data, fastpath=True)
else:
raise TypeError(
"unable to convert object into a variable without an "
"explicit list of dimensions: %r" % obj
)
if name is not None and name in obj.dims:
# convert the Variable into an Index
if obj.ndim != 1:
raise MissingDimensionsError(
"%r has more than 1-dimension and the same name as one of its "
"dimensions %r. xarray disallows such variables because they "
"conflict with the coordinates used to label "
"dimensions." % (name, obj.dims)
)
obj = obj.to_index_variable()
return obj
def _maybe_wrap_data(data):
"""
Put pandas.Index and numpy.ndarray arguments in adapter objects to ensure
they can be indexed properly.
NumpyArrayAdapter, PandasIndexAdapter and LazilyOuterIndexedArray should
all pass through unmodified.
"""
if isinstance(data, pd.Index):
return PandasIndexAdapter(data)
return data
def _possibly_convert_objects(values):
"""Convert arrays of datetime.datetime and datetime.timedelta objects into
datetime64 and timedelta64, according to the pandas convention.
"""
return np.asarray(pd.Series(values.ravel())).reshape(values.shape)
def as_compatible_data(data, fastpath=False):
"""Prepare and wrap data to put in a Variable.
- If data does not have the necessary attributes, convert it to ndarray.
- If data has dtype=datetime64, ensure that it has ns precision. If it's a
pandas.Timestamp, convert it to datetime64.
- If data is already a pandas or xarray object (other than an Index), just
use the values.
Finally, wrap it up with an adapter if necessary.
"""
if fastpath and getattr(data, "ndim", 0) > 0:
# can't use fastpath (yet) for scalars
return _maybe_wrap_data(data)
if isinstance(data, Variable):
return data.data
if isinstance(data, NON_NUMPY_SUPPORTED_ARRAY_TYPES):
return _maybe_wrap_data(data)
if isinstance(data, tuple):
data = utils.to_0d_object_array(data)
if isinstance(data, pd.Timestamp):
# TODO: convert, handle datetime objects, too
data = np.datetime64(data.value, "ns")
if isinstance(data, timedelta):
data = np.timedelta64(getattr(data, "value", data), "ns")
# we don't want nested self-described arrays
data = getattr(data, "values", data)
if isinstance(data, np.ma.MaskedArray):
mask = np.ma.getmaskarray(data)
if mask.any():
dtype, fill_value = dtypes.maybe_promote(data.dtype)
data = np.asarray(data, dtype=dtype)
data[mask] = fill_value
else:
data = np.asarray(data)
if not isinstance(data, np.ndarray):
if hasattr(data, "__array_function__"):
if IS_NEP18_ACTIVE:
return data
else:
raise TypeError(
"Got an NumPy-like array type providing the "
"__array_function__ protocol but NEP18 is not enabled. "
"Check that numpy >= v1.16 and that the environment "
'variable "NUMPY_EXPERIMENTAL_ARRAY_FUNCTION" is set to '
'"1"'
)
# validate whether the data is valid data types
data = np.asarray(data)
if isinstance(data, np.ndarray):
if data.dtype.kind == "O":
data = _possibly_convert_objects(data)
elif data.dtype.kind == "M":
data = np.asarray(data, "datetime64[ns]")
elif data.dtype.kind == "m":
data = np.asarray(data, "timedelta64[ns]")
return _maybe_wrap_data(data)
def _as_array_or_item(data):
"""Return the given values as a numpy array, or as an individual item if
it's a 0d datetime64 or timedelta64 array.
Importantly, this function does not copy data if it is already an ndarray -
otherwise, it will not be possible to update Variable values in place.
This function mostly exists because 0-dimensional ndarrays with
dtype=datetime64 are broken :(
https://github.com/numpy/numpy/issues/4337
https://github.com/numpy/numpy/issues/7619
TODO: remove this (replace with np.asarray) once these issues are fixed
"""
data = np.asarray(data)
if data.ndim == 0:
if data.dtype.kind == "M":
data = np.datetime64(data, "ns")
elif data.dtype.kind == "m":
data = np.timedelta64(data, "ns")
return data
class Variable(
common.AbstractArray, arithmetic.SupportsArithmetic, utils.NdimSizeLenMixin
):
"""A netcdf-like variable consisting of dimensions, data and attributes
which describe a single Array. A single Variable object is not fully
described outside the context of its parent Dataset (if you want such a
fully described object, use a DataArray instead).
The main functional difference between Variables and numpy arrays is that
numerical operations on Variables implement array broadcasting by dimension
name. For example, adding an Variable with dimensions `('time',)` to
another Variable with dimensions `('space',)` results in a new Variable
with dimensions `('time', 'space')`. Furthermore, numpy reduce operations
like ``mean`` or ``sum`` are overwritten to take a "dimension" argument
instead of an "axis".
Variables are light-weight objects used as the building block for datasets.
They are more primitive objects, so operations with them provide marginally
higher performance than using DataArrays. However, manipulating data in the
form of a Dataset or DataArray should almost always be preferred, because
they can use more complete metadata in context of coordinate labels.
"""
__slots__ = ("_dims", "_data", "_attrs", "_encoding")
def __init__(self, dims, data, attrs=None, encoding=None, fastpath=False):
"""
Parameters
----------
dims : str or sequence of str
Name(s) of the the data dimension(s). Must be either a string (only
for 1D data) or a sequence of strings with length equal to the
number of dimensions.
data : array_like
Data array which supports numpy-like data access.
attrs : dict_like or None, optional
Attributes to assign to the new variable. If None (default), an
empty attribute dictionary is initialized.
encoding : dict_like or None, optional
Dictionary specifying how to encode this array's data into a
serialized format like netCDF4. Currently used keys (for netCDF)
include '_FillValue', 'scale_factor', 'add_offset' and 'dtype'.
Well-behaved code to serialize a Variable should ignore
unrecognized encoding items.
"""
self._data = as_compatible_data(data, fastpath=fastpath)
self._dims = self._parse_dimensions(dims)
self._attrs = None
self._encoding = None
if attrs is not None:
self.attrs = attrs
if encoding is not None:
self.encoding = encoding
@property
def dtype(self):
return self._data.dtype
@property
def shape(self):
return self._data.shape
@property
def nbytes(self):
return self.size * self.dtype.itemsize
@property
def _in_memory(self):
return isinstance(self._data, (np.ndarray, np.number, PandasIndexAdapter)) or (
isinstance(self._data, indexing.MemoryCachedArray)
and isinstance(self._data.array, indexing.NumpyIndexingAdapter)
)
@property
def data(self):
if hasattr(self._data, "__array_function__") or isinstance(
self._data, dask_array_type
):
return self._data
else:
return self.values
@data.setter
def data(self, data):
data = as_compatible_data(data)
if data.shape != self.shape:
raise ValueError(
f"replacement data must match the Variable's shape. "
f"replacement data has shape {data.shape}; Variable has shape {self.shape}"
)
self._data = data
def load(self, **kwargs):
"""Manually trigger loading of this variable's data from disk or a
remote source into memory and return this variable.
Normally, it should not be necessary to call this method in user code,
because all xarray functions should either work on deferred data or
load data automatically.
Parameters
----------
**kwargs : dict
Additional keyword arguments passed on to ``dask.array.compute``.
See Also
--------
dask.array.compute
"""
if isinstance(self._data, dask_array_type):
self._data = as_compatible_data(self._data.compute(**kwargs))
elif not hasattr(self._data, "__array_function__"):
self._data = np.asarray(self._data)
return self
def compute(self, **kwargs):
"""Manually trigger loading of this variable's data from disk or a
remote source into memory and return a new variable. The original is
left unaltered.
Normally, it should not be necessary to call this method in user code,
because all xarray functions should either work on deferred data or
load data automatically.
Parameters
----------
**kwargs : dict
Additional keyword arguments passed on to ``dask.array.compute``.
See Also
--------
dask.array.compute
"""
new = self.copy(deep=False)
return new.load(**kwargs)
def __dask_tokenize__(self):
# Use v.data, instead of v._data, in order to cope with the wrappers
# around NetCDF and the like
from dask.base import normalize_token
return normalize_token((type(self), self._dims, self.data, self._attrs))
def __dask_graph__(self):
if isinstance(self._data, dask_array_type):
return self._data.__dask_graph__()
else:
return None
def __dask_keys__(self):
return self._data.__dask_keys__()
def __dask_layers__(self):
return self._data.__dask_layers__()
@property
def __dask_optimize__(self):
return self._data.__dask_optimize__
@property
def __dask_scheduler__(self):
return self._data.__dask_scheduler__
def __dask_postcompute__(self):
array_func, array_args = self._data.__dask_postcompute__()
return (
self._dask_finalize,
(array_func, array_args, self._dims, self._attrs, self._encoding),
)
def __dask_postpersist__(self):
array_func, array_args = self._data.__dask_postpersist__()
return (
self._dask_finalize,
(array_func, array_args, self._dims, self._attrs, self._encoding),
)
@staticmethod
def _dask_finalize(results, array_func, array_args, dims, attrs, encoding):
if isinstance(results, dict): # persist case
name = array_args[0]
results = {k: v for k, v in results.items() if k[0] == name}
data = array_func(results, *array_args)
return Variable(dims, data, attrs=attrs, encoding=encoding)
@property
def values(self):
"""The variable's data as a numpy.ndarray"""
return _as_array_or_item(self._data)
@values.setter
def values(self, values):
self.data = values
def to_base_variable(self):
"""Return this variable as a base xarray.Variable"""
return Variable(
self.dims, self._data, self._attrs, encoding=self._encoding, fastpath=True
)
to_variable = utils.alias(to_base_variable, "to_variable")
def to_index_variable(self):
"""Return this variable as an xarray.IndexVariable"""
return IndexVariable(
self.dims, self._data, self._attrs, encoding=self._encoding, fastpath=True
)
to_coord = utils.alias(to_index_variable, "to_coord")
def to_index(self):
"""Convert this variable to a pandas.Index"""
return self.to_index_variable().to_index()
def to_dict(self, data=True):
"""Dictionary representation of variable."""
item = {"dims": self.dims, "attrs": decode_numpy_dict_values(self.attrs)}
if data:
item["data"] = ensure_us_time_resolution(self.values).tolist()
else:
item.update({"dtype": str(self.dtype), "shape": self.shape})
return item
@property
def dims(self):
"""Tuple of dimension names with which this variable is associated.
"""
return self._dims
@dims.setter
def dims(self, value):
self._dims = self._parse_dimensions(value)
def _parse_dimensions(self, dims):
if isinstance(dims, str):
dims = (dims,)
dims = tuple(dims)
if len(dims) != self.ndim:
raise ValueError(
"dimensions %s must have the same length as the "
"number of data dimensions, ndim=%s" % (dims, self.ndim)
)
return dims
def _item_key_to_tuple(self, key):
if utils.is_dict_like(key):
return tuple(key.get(dim, slice(None)) for dim in self.dims)
else:
return key
def _broadcast_indexes(self, key):
"""Prepare an indexing key for an indexing operation.
Parameters
-----------
key: int, slice, array, dict or tuple of integer, slices and arrays
Any valid input for indexing.
Returns
-------
dims: tuple
Dimension of the resultant variable.
indexers: IndexingTuple subclass
Tuple of integer, array-like, or slices to use when indexing
self._data. The type of this argument indicates the type of
indexing to perform, either basic, outer or vectorized.
new_order : Optional[Sequence[int]]
Optional reordering to do on the result of indexing. If not None,
the first len(new_order) indexing should be moved to these
positions.
"""
key = self._item_key_to_tuple(key) # key is a tuple
# key is a tuple of full size
key = indexing.expanded_indexer(key, self.ndim)
# Convert a scalar Variable to an integer
key = tuple(
k.data.item() if isinstance(k, Variable) and k.ndim == 0 else k for k in key
)
# Convert a 0d-array to an integer
key = tuple(
k.item() if isinstance(k, np.ndarray) and k.ndim == 0 else k for k in key
)
if all(isinstance(k, BASIC_INDEXING_TYPES) for k in key):
return self._broadcast_indexes_basic(key)
self._validate_indexers(key)
# Detect it can be mapped as an outer indexer
# If all key is unlabeled, or
# key can be mapped as an OuterIndexer.
if all(not isinstance(k, Variable) for k in key):
return self._broadcast_indexes_outer(key)
# If all key is 1-dimensional and there are no duplicate labels,
# key can be mapped as an OuterIndexer.
dims = []
for k, d in zip(key, self.dims):
if isinstance(k, Variable):
if len(k.dims) > 1:
return self._broadcast_indexes_vectorized(key)
dims.append(k.dims[0])
elif not isinstance(k, integer_types):
dims.append(d)
if len(set(dims)) == len(dims):
return self._broadcast_indexes_outer(key)
return self._broadcast_indexes_vectorized(key)
def _broadcast_indexes_basic(self, key):
dims = tuple(
dim for k, dim in zip(key, self.dims) if not isinstance(k, integer_types)
)
return dims, BasicIndexer(key), None
def _validate_indexers(self, key):
""" Make sanity checks """
for dim, k in zip(self.dims, key):
if isinstance(k, BASIC_INDEXING_TYPES):
pass
else:
if not isinstance(k, Variable):
k = np.asarray(k)
if k.ndim > 1:
raise IndexError(
"Unlabeled multi-dimensional array cannot be "
"used for indexing: {}".format(k)
)
if k.dtype.kind == "b":
if self.shape[self.get_axis_num(dim)] != len(k):
raise IndexError(
"Boolean array size {:d} is used to index array "
"with shape {:s}.".format(len(k), str(self.shape))
)
if k.ndim > 1:
raise IndexError(
"{}-dimensional boolean indexing is "
"not supported. ".format(k.ndim)
)
if getattr(k, "dims", (dim,)) != (dim,):
raise IndexError(
"Boolean indexer should be unlabeled or on the "
"same dimension to the indexed array. Indexer is "
"on {:s} but the target dimension is {:s}.".format(
str(k.dims), dim
)
)
def _broadcast_indexes_outer(self, key):
dims = tuple(
k.dims[0] if isinstance(k, Variable) else dim
for k, dim in zip(key, self.dims)
if not isinstance(k, integer_types)
)
new_key = []
for k in key:
if isinstance(k, Variable):
k = k.data
if not isinstance(k, BASIC_INDEXING_TYPES):
k = np.asarray(k)
if k.size == 0:
# Slice by empty list; numpy could not infer the dtype
k = k.astype(int)
elif k.dtype.kind == "b":
(k,) = np.nonzero(k)
new_key.append(k)
return dims, OuterIndexer(tuple(new_key)), None
def _nonzero(self):
""" Equivalent numpy's nonzero but returns a tuple of Varibles. """
# TODO we should replace dask's native nonzero
# after https://github.com/dask/dask/issues/1076 is implemented.
nonzeros = np.nonzero(self.data)
return tuple(Variable((dim), nz) for nz, dim in zip(nonzeros, self.dims))
def _broadcast_indexes_vectorized(self, key):
variables = []
out_dims_set = OrderedSet()
for dim, value in zip(self.dims, key):
if isinstance(value, slice):
out_dims_set.add(dim)
else:
variable = (
value
if isinstance(value, Variable)
else as_variable(value, name=dim)
)
if variable.dtype.kind == "b": # boolean indexing case
(variable,) = variable._nonzero()
variables.append(variable)
out_dims_set.update(variable.dims)
variable_dims = set()
for variable in variables:
variable_dims.update(variable.dims)
slices = []
for i, (dim, value) in enumerate(zip(self.dims, key)):
if isinstance(value, slice):
if dim in variable_dims:
# We only convert slice objects to variables if they share
# a dimension with at least one other variable. Otherwise,
# we can equivalently leave them as slices aknd transpose
# the result. This is significantly faster/more efficient
# for most array backends.
values = np.arange(*value.indices(self.sizes[dim]))
variables.insert(i - len(slices), Variable((dim,), values))
else:
slices.append((i, value))
try:
variables = _broadcast_compat_variables(*variables)
except ValueError:
raise IndexError(f"Dimensions of indexers mismatch: {key}")
out_key = [variable.data for variable in variables]
out_dims = tuple(out_dims_set)
slice_positions = set()
for i, value in slices:
out_key.insert(i, value)
new_position = out_dims.index(self.dims[i])
slice_positions.add(new_position)
if slice_positions:
new_order = [i for i in range(len(out_dims)) if i not in slice_positions]
else:
new_order = None
return out_dims, VectorizedIndexer(tuple(out_key)), new_order
def __getitem__(self: VariableType, key) -> VariableType:
"""Return a new Variable object whose contents are consistent with
getting the provided key from the underlying data.
NB. __getitem__ and __setitem__ implement xarray-style indexing,
where if keys are unlabeled arrays, we index the array orthogonally
with them. If keys are labeled array (such as Variables), they are
broadcasted with our usual scheme and then the array is indexed with
the broadcasted key, like numpy's fancy indexing.
If you really want to do indexing like `x[x > 0]`, manipulate the numpy
array `x.values` directly.
"""
dims, indexer, new_order = self._broadcast_indexes(key)
data = as_indexable(self._data)[indexer]
if new_order:
data = duck_array_ops.moveaxis(data, range(len(new_order)), new_order)
return self._finalize_indexing_result(dims, data)
def _finalize_indexing_result(self: VariableType, dims, data) -> VariableType:
"""Used by IndexVariable to return IndexVariable objects when possible.
"""
return type(self)(dims, data, self._attrs, self._encoding, fastpath=True)
def _getitem_with_mask(self, key, fill_value=dtypes.NA):
"""Index this Variable with -1 remapped to fill_value."""
# TODO(shoyer): expose this method in public API somewhere (isel?) and
# use it for reindex.
# TODO(shoyer): add a sanity check that all other integers are
# non-negative
# TODO(shoyer): add an optimization, remapping -1 to an adjacent value
# that is actually indexed rather than mapping it to the last value
# along each axis.
if fill_value is dtypes.NA:
fill_value = dtypes.get_fill_value(self.dtype)
dims, indexer, new_order = self._broadcast_indexes(key)
if self.size:
if isinstance(self._data, dask_array_type):
# dask's indexing is faster this way; also vindex does not
# support negative indices yet:
# https://github.com/dask/dask/pull/2967
actual_indexer = indexing.posify_mask_indexer(indexer)
else:
actual_indexer = indexer
data = as_indexable(self._data)[actual_indexer]
mask = indexing.create_mask(indexer, self.shape, data)
# we need to invert the mask in order to pass data first. This helps
# pint to choose the correct unit
# TODO: revert after https://github.com/hgrecco/pint/issues/1019 is fixed
data = duck_array_ops.where(np.logical_not(mask), data, fill_value)
else:
# array cannot be indexed along dimensions of size 0, so just
# build the mask directly instead.
mask = indexing.create_mask(indexer, self.shape)
data = np.broadcast_to(fill_value, getattr(mask, "shape", ()))
if new_order:
data = duck_array_ops.moveaxis(data, range(len(new_order)), new_order)
return self._finalize_indexing_result(dims, data)
def __setitem__(self, key, value):
"""__setitem__ is overloaded to access the underlying numpy values with
orthogonal indexing.
See __getitem__ for more details.
"""
dims, index_tuple, new_order = self._broadcast_indexes(key)
if not isinstance(value, Variable):
value = as_compatible_data(value)
if value.ndim > len(dims):
raise ValueError(
"shape mismatch: value array of shape %s could not be "
"broadcast to indexing result with %s dimensions"
% (value.shape, len(dims))
)
if value.ndim == 0:
value = Variable((), value)
else:
value = Variable(dims[-value.ndim :], value)
# broadcast to become assignable
value = value.set_dims(dims).data
if new_order:
value = duck_array_ops.asarray(value)
value = value[(len(dims) - value.ndim) * (np.newaxis,) + (Ellipsis,)]
value = duck_array_ops.moveaxis(value, new_order, range(len(new_order)))
indexable = as_indexable(self._data)
indexable[index_tuple] = value
@property
def attrs(self) -> Dict[Hashable, Any]:
"""Dictionary of local attributes on this variable.
"""
if self._attrs is None:
self._attrs = {}
return self._attrs
@attrs.setter
def attrs(self, value: Mapping[Hashable, Any]) -> None:
self._attrs = dict(value)
@property
def encoding(self):
"""Dictionary of encodings on this variable.
"""
if self._encoding is None:
self._encoding = {}
return self._encoding
@encoding.setter
def encoding(self, value):
try:
self._encoding = dict(value)
except ValueError:
raise ValueError("encoding must be castable to a dictionary")
def copy(self, deep=True, data=None):
"""Returns a copy of this object.
If `deep=True`, the data array is loaded into memory and copied onto
the new object. Dimensions, attributes and encodings are always copied.
Use `data` to create a new object with the same structure as
original but entirely new data.
Parameters
----------
deep : bool, optional
Whether the data array is loaded into memory and copied onto
the new object. Default is True.
data : array_like, optional
Data to use in the new object. Must have same shape as original.
When `data` is used, `deep` is ignored.
Returns
-------
object : Variable
New object with dimensions, attributes, encodings, and optionally
data copied from original.
Examples
--------
Shallow copy versus deep copy
>>> var = xr.Variable(data=[1, 2, 3], dims="x")
>>> var.copy()
<xarray.Variable (x: 3)>
array([1, 2, 3])
>>> var_0 = var.copy(deep=False)
>>> var_0[0] = 7
>>> var_0
<xarray.Variable (x: 3)>
array([7, 2, 3])
>>> var
<xarray.Variable (x: 3)>
array([7, 2, 3])
Changing the data using the ``data`` argument maintains the
structure of the original object, but with the new data. Original
object is unaffected.
>>> var.copy(data=[0.1, 0.2, 0.3])
<xarray.Variable (x: 3)>
array([ 0.1, 0.2, 0.3])
>>> var
<xarray.Variable (x: 3)>
array([7, 2, 3])
See Also
--------
pandas.DataFrame.copy
"""
if data is None:
data = self._data
if isinstance(data, indexing.MemoryCachedArray):
# don't share caching between copies
data = indexing.MemoryCachedArray(data.array)
if deep:
if hasattr(data, "__array_function__") or isinstance(
data, dask_array_type
):
data = data.copy()
elif not isinstance(data, PandasIndexAdapter):
# pandas.Index is immutable
data = np.array(data)
else:
data = as_compatible_data(data)
if self.shape != data.shape:
raise ValueError(
"Data shape {} must match shape of object {}".format(
data.shape, self.shape
)
)
# note:
# dims is already an immutable tuple
# attributes and encoding will be copied when the new Array is created
return self._replace(data=data)
def _replace(
self, dims=_default, data=_default, attrs=_default, encoding=_default
) -> "Variable":
if dims is _default:
dims = copy.copy(self._dims)
if data is _default:
data = copy.copy(self.data)
if attrs is _default:
attrs = copy.copy(self._attrs)
if encoding is _default:
encoding = copy.copy(self._encoding)
return type(self)(dims, data, attrs, encoding, fastpath=True)
def __copy__(self):
return self.copy(deep=False)
def __deepcopy__(self, memo=None):
# memo does nothing but is required for compatibility with
# copy.deepcopy
return self.copy(deep=True)
# mutable objects should not be hashable
# https://github.com/python/mypy/issues/4266
__hash__ = None # type: ignore
@property
def chunks(self):
"""Block dimensions for this array's data or None if it's not a dask
array.
"""
return getattr(self._data, "chunks", None)
_array_counter = itertools.count()
def chunk(self, chunks=None, name=None, lock=False):
"""Coerce this array's data into a dask arrays with the given chunks.
If this variable is a non-dask array, it will be converted to dask
array. If it's a dask array, it will be rechunked to the given chunk
sizes.
If neither chunks is not provided for one or more dimensions, chunk
sizes along that dimension will not be updated; non-dask arrays will be
converted into dask arrays with a single block.
Parameters
----------
chunks : int, tuple or dict, optional
Chunk sizes along each dimension, e.g., ``5``, ``(5, 5)`` or
``{'x': 5, 'y': 5}``.
name : str, optional
Used to generate the name for this array in the internal dask
graph. Does not need not be unique.
lock : optional
Passed on to :py:func:`dask.array.from_array`, if the array is not
already as dask array.
Returns
-------
chunked : xarray.Variable
"""
import dask
import dask.array as da
if utils.is_dict_like(chunks):
chunks = {self.get_axis_num(dim): chunk for dim, chunk in chunks.items()}
if chunks is None:
chunks = self.chunks or self.shape
data = self._data
if isinstance(data, da.Array):
data = data.rechunk(chunks)
else:
if isinstance(data, indexing.ExplicitlyIndexed):
# Unambiguously handle array storage backends (like NetCDF4 and h5py)
# that can't handle general array indexing. For example, in netCDF4 you
# can do "outer" indexing along two dimensions independent, which works
# differently from how NumPy handles it.
# da.from_array works by using lazy indexing with a tuple of slices.
# Using OuterIndexer is a pragmatic choice: dask does not yet handle
# different indexing types in an explicit way:
# https://github.com/dask/dask/issues/2883
data = indexing.ImplicitToExplicitIndexingAdapter(
data, indexing.OuterIndexer
)
if LooseVersion(dask.__version__) < "2.0.0":
kwargs = {}
else:
# All of our lazily loaded backend array classes should use NumPy
# array operations.
kwargs = {"meta": np.ndarray}
else:
kwargs = {}
if utils.is_dict_like(chunks):
chunks = tuple(chunks.get(n, s) for n, s in enumerate(self.shape))
data = da.from_array(data, chunks, name=name, lock=lock, **kwargs)
return type(self)(self.dims, data, self._attrs, self._encoding, fastpath=True)
def _as_sparse(self, sparse_format=_default, fill_value=dtypes.NA):
"""
use sparse-array as backend.
"""
import sparse
# TODO what to do if dask-backended?
if fill_value is dtypes.NA:
dtype, fill_value = dtypes.maybe_promote(self.dtype)
else:
dtype = dtypes.result_type(self.dtype, fill_value)
if sparse_format is _default:
sparse_format = "coo"
try:
as_sparse = getattr(sparse, "as_{}".format(sparse_format.lower()))
except AttributeError:
raise ValueError("{} is not a valid sparse format".format(sparse_format))
data = as_sparse(self.data.astype(dtype), fill_value=fill_value)
return self._replace(data=data)
def _to_dense(self):
"""
Change backend from sparse to np.array
"""
if hasattr(self._data, "todense"):
return self._replace(data=self._data.todense())
return self.copy(deep=False)
def isel(
self: VariableType,
indexers: Mapping[Hashable, Any] = None,
missing_dims: str = "raise",
**indexers_kwargs: Any,
) -> VariableType:
"""Return a new array indexed along the specified dimension(s).
Parameters
----------
**indexers : {dim: indexer, ...}
Keyword arguments with names matching dimensions and values given
by integers, slice objects or arrays.
missing_dims : {"raise", "warn", "ignore"}, default "raise"
What to do if dimensions that should be selected from are not present in the
DataArray:
- "exception": raise an exception
- "warning": raise a warning, and ignore the missing dimensions
- "ignore": ignore the missing dimensions
Returns
-------
obj : Array object
A new Array with the selected data and dimensions. In general,
the new variable's data will be a view of this variable's data,
unless numpy fancy indexing was triggered by using an array
indexer, in which case the data will be a copy.
"""
indexers = either_dict_or_kwargs(indexers, indexers_kwargs, "isel")
indexers = drop_dims_from_indexers(indexers, self.dims, missing_dims)
key = tuple(indexers.get(dim, slice(None)) for dim in self.dims)
return self[key]
def squeeze(self, dim=None):
"""Return a new object with squeezed data.
Parameters
----------
dim : None or str or tuple of str, optional
Selects a subset of the length one dimensions. If a dimension is
selected with length greater than one, an error is raised. If
None, all length one dimensions are squeezed.
Returns
-------
squeezed : same type as caller
This object, but with with all or a subset of the dimensions of
length 1 removed.
See Also
--------
numpy.squeeze
"""
dims = common.get_squeeze_dims(self, dim)
return self.isel({d: 0 for d in dims})
def _shift_one_dim(self, dim, count, fill_value=dtypes.NA):
axis = self.get_axis_num(dim)
if count > 0:
keep = slice(None, -count)
elif count < 0:
keep = slice(-count, None)
else:
keep = slice(None)
trimmed_data = self[(slice(None),) * axis + (keep,)].data
if fill_value is dtypes.NA:
dtype, fill_value = dtypes.maybe_promote(self.dtype)
else:
dtype = self.dtype
width = min(abs(count), self.shape[axis])
dim_pad = (width, 0) if count >= 0 else (0, width)
pads = [(0, 0) if d != dim else dim_pad for d in self.dims]
data = duck_array_ops.pad(
trimmed_data.astype(dtype),
pads,
mode="constant",
constant_values=fill_value,
)
if isinstance(data, dask_array_type):
# chunked data should come out with the same chunks; this makes
# it feasible to combine shifted and unshifted data
# TODO: remove this once dask.array automatically aligns chunks
data = data.rechunk(self.data.chunks)
return type(self)(self.dims, data, self._attrs, fastpath=True)
def shift(self, shifts=None, fill_value=dtypes.NA, **shifts_kwargs):
"""
Return a new Variable with shifted data.
Parameters
----------
shifts : mapping of the form {dim: offset}
Integer offset to shift along each of the given dimensions.
Positive offsets shift to the right; negative offsets shift to the
left.
fill_value: scalar, optional
Value to use for newly missing values
**shifts_kwargs:
The keyword arguments form of ``shifts``.
One of shifts or shifts_kwargs must be provided.
Returns
-------
shifted : Variable
Variable with the same dimensions and attributes but shifted data.
"""
shifts = either_dict_or_kwargs(shifts, shifts_kwargs, "shift")
result = self
for dim, count in shifts.items():
result = result._shift_one_dim(dim, count, fill_value=fill_value)
return result
def _pad_options_dim_to_index(
self,
pad_option: Mapping[Hashable, Union[int, Tuple[int, int]]],
fill_with_shape=False,
):
if fill_with_shape:
return [
(n, n) if d not in pad_option else pad_option[d]
for d, n in zip(self.dims, self.data.shape)
]
return [(0, 0) if d not in pad_option else pad_option[d] for d in self.dims]
def pad(
self,
pad_width: Mapping[Hashable, Union[int, Tuple[int, int]]] = None,
mode: str = "constant",
stat_length: Union[
int, Tuple[int, int], Mapping[Hashable, Tuple[int, int]]
] = None,
constant_values: Union[
int, Tuple[int, int], Mapping[Hashable, Tuple[int, int]]
] = None,
end_values: Union[
int, Tuple[int, int], Mapping[Hashable, Tuple[int, int]]
] = None,
reflect_type: str = None,
**pad_width_kwargs: Any,
):
"""
Return a new Variable with padded data.
Parameters
----------
pad_width: Mapping with the form of {dim: (pad_before, pad_after)}
Number of values padded along each dimension.
{dim: pad} is a shortcut for pad_before = pad_after = pad
mode: (str)
See numpy / Dask docs
stat_length : int, tuple or mapping of the form {dim: tuple}
Used in 'maximum', 'mean', 'median', and 'minimum'. Number of
values at edge of each axis used to calculate the statistic value.
constant_values : scalar, tuple or mapping of the form {dim: tuple}
Used in 'constant'. The values to set the padded values for each
axis.
end_values : scalar, tuple or mapping of the form {dim: tuple}
Used in 'linear_ramp'. The values used for the ending value of the
linear_ramp and that will form the edge of the padded array.
reflect_type : {'even', 'odd'}, optional
Used in 'reflect', and 'symmetric'. The 'even' style is the
default with an unaltered reflection around the edge value. For
the 'odd' style, the extended part of the array is created by
subtracting the reflected values from two times the edge value.
**pad_width_kwargs:
One of pad_width or pad_width_kwargs must be provided.
Returns
-------
padded : Variable
Variable with the same dimensions and attributes but padded data.
"""
pad_width = either_dict_or_kwargs(pad_width, pad_width_kwargs, "pad")
# change default behaviour of pad with mode constant
if mode == "constant" and (
constant_values is None or constant_values is dtypes.NA
):
dtype, constant_values = dtypes.maybe_promote(self.dtype)
else:
dtype = self.dtype
# create pad_options_kwargs, numpy requires only relevant kwargs to be nonempty
if isinstance(stat_length, dict):
stat_length = self._pad_options_dim_to_index(
stat_length, fill_with_shape=True
)
if isinstance(constant_values, dict):
constant_values = self._pad_options_dim_to_index(constant_values)
if isinstance(end_values, dict):
end_values = self._pad_options_dim_to_index(end_values)
# workaround for bug in Dask's default value of stat_length https://github.com/dask/dask/issues/5303
if stat_length is None and mode in ["maximum", "mean", "median", "minimum"]:
stat_length = [(n, n) for n in self.data.shape] # type: ignore
# change integer values to a tuple of two of those values and change pad_width to index
for k, v in pad_width.items():
if isinstance(v, numbers.Number):
pad_width[k] = (v, v)
pad_width_by_index = self._pad_options_dim_to_index(pad_width)
# create pad_options_kwargs, numpy/dask requires only relevant kwargs to be nonempty
pad_option_kwargs = {}
if stat_length is not None:
pad_option_kwargs["stat_length"] = stat_length
if constant_values is not None:
pad_option_kwargs["constant_values"] = constant_values
if end_values is not None:
pad_option_kwargs["end_values"] = end_values
if reflect_type is not None:
pad_option_kwargs["reflect_type"] = reflect_type # type: ignore
array = duck_array_ops.pad(
self.data.astype(dtype, copy=False),
pad_width_by_index,
mode=mode,
**pad_option_kwargs,
)
return type(self)(self.dims, array)
def _roll_one_dim(self, dim, count):
axis = self.get_axis_num(dim)
count %= self.shape[axis]
if count != 0:
indices = [slice(-count, None), slice(None, -count)]
else:
indices = [slice(None)]
arrays = [self[(slice(None),) * axis + (idx,)].data for idx in indices]
data = duck_array_ops.concatenate(arrays, axis)
if isinstance(data, dask_array_type):
# chunked data should come out with the same chunks; this makes
# it feasible to combine shifted and unshifted data
# TODO: remove this once dask.array automatically aligns chunks
data = data.rechunk(self.data.chunks)
return type(self)(self.dims, data, self._attrs, fastpath=True)
def roll(self, shifts=None, **shifts_kwargs):
"""
Return a new Variable with rolld data.
Parameters
----------
shifts : mapping of the form {dim: offset}
Integer offset to roll along each of the given dimensions.
Positive offsets roll to the right; negative offsets roll to the
left.
**shifts_kwargs:
The keyword arguments form of ``shifts``.
One of shifts or shifts_kwargs must be provided.
Returns
-------
shifted : Variable
Variable with the same dimensions and attributes but rolled data.
"""
shifts = either_dict_or_kwargs(shifts, shifts_kwargs, "roll")
result = self
for dim, count in shifts.items():
result = result._roll_one_dim(dim, count)
return result
def transpose(self, *dims) -> "Variable":
"""Return a new Variable object with transposed dimensions.
Parameters
----------
*dims : str, optional
By default, reverse the dimensions. Otherwise, reorder the
dimensions to this order.
Returns
-------
transposed : Variable
The returned object has transposed data and dimensions with the
same attributes as the original.
Notes
-----
This operation returns a view of this variable's data. It is
lazy for dask-backed Variables but not for numpy-backed Variables.
See Also
--------
numpy.transpose
"""
if len(dims) == 0:
dims = self.dims[::-1]
dims = tuple(infix_dims(dims, self.dims))
axes = self.get_axis_num(dims)
if len(dims) < 2 or dims == self.dims:
# no need to transpose if only one dimension
# or dims are in same order
return self.copy(deep=False)
data = as_indexable(self._data).transpose(axes)
return type(self)(dims, data, self._attrs, self._encoding, fastpath=True)
@property
def T(self) -> "Variable":
return self.transpose()
def set_dims(self, dims, shape=None):
"""Return a new variable with given set of dimensions.
This method might be used to attach new dimension(s) to variable.
When possible, this operation does not copy this variable's data.
Parameters
----------
dims : str or sequence of str or dict
Dimensions to include on the new variable. If a dict, values are
used to provide the sizes of new dimensions; otherwise, new
dimensions are inserted with length 1.
Returns
-------
Variable
"""
if isinstance(dims, str):
dims = [dims]
if shape is None and utils.is_dict_like(dims):
shape = dims.values()
missing_dims = set(self.dims) - set(dims)
if missing_dims:
raise ValueError(
"new dimensions %r must be a superset of "
"existing dimensions %r" % (dims, self.dims)
)
self_dims = set(self.dims)
expanded_dims = tuple(d for d in dims if d not in self_dims) + self.dims
if self.dims == expanded_dims:
# don't use broadcast_to unless necessary so the result remains
# writeable if possible
expanded_data = self.data
elif shape is not None:
dims_map = dict(zip(dims, shape))
tmp_shape = tuple(dims_map[d] for d in expanded_dims)
expanded_data = duck_array_ops.broadcast_to(self.data, tmp_shape)
else:
expanded_data = self.data[(None,) * (len(expanded_dims) - self.ndim)]
expanded_var = Variable(
expanded_dims, expanded_data, self._attrs, self._encoding, fastpath=True
)
return expanded_var.transpose(*dims)
def _stack_once(self, dims, new_dim):
if not set(dims) <= set(self.dims):
raise ValueError("invalid existing dimensions: %s" % dims)
if new_dim in self.dims:
raise ValueError(
"cannot create a new dimension with the same "
"name as an existing dimension"
)
if len(dims) == 0:
# don't stack
return self.copy(deep=False)
other_dims = [d for d in self.dims if d not in dims]
dim_order = other_dims + list(dims)
reordered = self.transpose(*dim_order)
new_shape = reordered.shape[: len(other_dims)] + (-1,)
new_data = reordered.data.reshape(new_shape)
new_dims = reordered.dims[: len(other_dims)] + (new_dim,)
return Variable(new_dims, new_data, self._attrs, self._encoding, fastpath=True)
def stack(self, dimensions=None, **dimensions_kwargs):
"""
Stack any number of existing dimensions into a single new dimension.
New dimensions will be added at the end, and the order of the data
along each new dimension will be in contiguous (C) order.
Parameters
----------
dimensions : Mapping of form new_name=(dim1, dim2, ...)
Names of new dimensions, and the existing dimensions that they
replace.
**dimensions_kwargs:
The keyword arguments form of ``dimensions``.
One of dimensions or dimensions_kwargs must be provided.
Returns
-------
stacked : Variable
Variable with the same attributes but stacked data.
See also
--------
Variable.unstack
"""
dimensions = either_dict_or_kwargs(dimensions, dimensions_kwargs, "stack")
result = self
for new_dim, dims in dimensions.items():
result = result._stack_once(dims, new_dim)
return result
def _unstack_once(self, dims, old_dim):
new_dim_names = tuple(dims.keys())
new_dim_sizes = tuple(dims.values())
if old_dim not in self.dims:
raise ValueError("invalid existing dimension: %s" % old_dim)
if set(new_dim_names).intersection(self.dims):
raise ValueError(
"cannot create a new dimension with the same "
"name as an existing dimension"
)
if np.prod(new_dim_sizes) != self.sizes[old_dim]:
raise ValueError(
"the product of the new dimension sizes must "
"equal the size of the old dimension"
)
other_dims = [d for d in self.dims if d != old_dim]
dim_order = other_dims + [old_dim]
reordered = self.transpose(*dim_order)
new_shape = reordered.shape[: len(other_dims)] + new_dim_sizes
new_data = reordered.data.reshape(new_shape)
new_dims = reordered.dims[: len(other_dims)] + new_dim_names
return Variable(new_dims, new_data, self._attrs, self._encoding, fastpath=True)
def unstack(self, dimensions=None, **dimensions_kwargs):
"""
Unstack an existing dimension into multiple new dimensions.
New dimensions will be added at the end, and the order of the data
along each new dimension will be in contiguous (C) order.
Parameters
----------
dimensions : mapping of the form old_dim={dim1: size1, ...}
Names of existing dimensions, and the new dimensions and sizes
that they map to.
**dimensions_kwargs:
The keyword arguments form of ``dimensions``.
One of dimensions or dimensions_kwargs must be provided.
Returns
-------
unstacked : Variable
Variable with the same attributes but unstacked data.
See also
--------
Variable.stack
"""
dimensions = either_dict_or_kwargs(dimensions, dimensions_kwargs, "unstack")
result = self
for old_dim, dims in dimensions.items():
result = result._unstack_once(dims, old_dim)
return result
def fillna(self, value):
return ops.fillna(self, value)
def where(self, cond, other=dtypes.NA):
return ops.where_method(self, cond, other)
def reduce(
self,
func,
dim=None,
axis=None,
keep_attrs=None,
keepdims=False,
allow_lazy=None,
**kwargs,
):
"""Reduce this array by applying `func` along some dimension(s).
Parameters
----------
func : function
Function which can be called in the form
`func(x, axis=axis, **kwargs)` to return the result of reducing an
np.ndarray over an integer valued axis.
dim : str or sequence of str, optional
Dimension(s) over which to apply `func`.
axis : int or sequence of int, optional
Axis(es) over which to apply `func`. Only one of the 'dim'
and 'axis' arguments can be supplied. If neither are supplied, then
the reduction is calculated over the flattened array (by calling
`func(x)` without an axis argument).
keep_attrs : bool, optional
If True, the variable's attributes (`attrs`) will be copied from
the original object to the new one. If False (default), the new
object will be returned without attributes.
keepdims : bool, default False
If True, the dimensions which are reduced are left in the result
as dimensions of size one
**kwargs : dict
Additional keyword arguments passed on to `func`.
Returns
-------
reduced : Array
Array with summarized data and the indicated dimension(s)
removed.
"""
if dim == ...:
dim = None
if dim is not None and axis is not None:
raise ValueError("cannot supply both 'axis' and 'dim' arguments")
if dim is not None:
axis = self.get_axis_num(dim)
if allow_lazy is not None:
warnings.warn(
"allow_lazy is deprecated and will be removed in version 0.16.0. It is now True by default.",
DeprecationWarning,
)
else:
allow_lazy = True
input_data = self.data if allow_lazy else self.values
if axis is not None:
data = func(input_data, axis=axis, **kwargs)
else:
data = func(input_data, **kwargs)
if getattr(data, "shape", ()) == self.shape:
dims = self.dims
else:
removed_axes = (
range(self.ndim) if axis is None else np.atleast_1d(axis) % self.ndim
)
if keepdims:
# Insert np.newaxis for removed dims
slices = tuple(
np.newaxis if i in removed_axes else slice(None, None)
for i in range(self.ndim)
)
if getattr(data, "shape", None) is None:
# Reduce has produced a scalar value, not an array-like
data = np.asanyarray(data)[slices]
else:
data = data[slices]
dims = self.dims
else:
dims = [
adim for n, adim in enumerate(self.dims) if n not in removed_axes
]
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=False)
attrs = self._attrs if keep_attrs else None
return Variable(dims, data, attrs=attrs)
@classmethod
def concat(cls, variables, dim="concat_dim", positions=None, shortcut=False):
"""Concatenate variables along a new or existing dimension.
Parameters
----------
variables : iterable of Array
Arrays to stack together. Each variable is expected to have
matching dimensions and shape except for along the stacked
dimension.
dim : str or DataArray, optional
Name of the dimension to stack along. This can either be a new
dimension name, in which case it is added along axis=0, or an
existing dimension name, in which case the location of the
dimension is unchanged. Where to insert the new dimension is
determined by the first variable.
positions : None or list of integer arrays, optional
List of integer arrays which specifies the integer positions to
which to assign each dataset along the concatenated dimension.
If not supplied, objects are concatenated in the provided order.
shortcut : bool, optional
This option is used internally to speed-up groupby operations.
If `shortcut` is True, some checks of internal consistency between
arrays to concatenate are skipped.
Returns
-------
stacked : Variable
Concatenated Variable formed by stacking all the supplied variables
along the given dimension.
"""
if not isinstance(dim, str):
(dim,) = dim.dims
# can't do this lazily: we need to loop through variables at least
# twice
variables = list(variables)
first_var = variables[0]
arrays = [v.data for v in variables]
if dim in first_var.dims:
axis = first_var.get_axis_num(dim)
dims = first_var.dims
data = duck_array_ops.concatenate(arrays, axis=axis)
if positions is not None:
# TODO: deprecate this option -- we don't need it for groupby
# any more.
indices = nputils.inverse_permutation(np.concatenate(positions))
data = duck_array_ops.take(data, indices, axis=axis)
else:
axis = 0
dims = (dim,) + first_var.dims
data = duck_array_ops.stack(arrays, axis=axis)
attrs = dict(first_var.attrs)
encoding = dict(first_var.encoding)
if not shortcut:
for var in variables:
if var.dims != first_var.dims:
raise ValueError(
f"Variable has dimensions {list(var.dims)} but first Variable has dimensions {list(first_var.dims)}"
)
return cls(dims, data, attrs, encoding)
def equals(self, other, equiv=duck_array_ops.array_equiv):
"""True if two Variables have the same dimensions and values;
otherwise False.
Variables can still be equal (like pandas objects) if they have NaN
values in the same locations.
This method is necessary because `v1 == v2` for Variables
does element-wise comparisons (like numpy.ndarrays).
"""
other = getattr(other, "variable", other)
try:
return self.dims == other.dims and (
self._data is other._data or equiv(self.data, other.data)
)
except (TypeError, AttributeError):
return False
def broadcast_equals(self, other, equiv=duck_array_ops.array_equiv):
"""True if two Variables have the values after being broadcast against
each other; otherwise False.
Variables can still be equal (like pandas objects) if they have NaN
values in the same locations.
"""
try:
self, other = broadcast_variables(self, other)
except (ValueError, AttributeError):
return False
return self.equals(other, equiv=equiv)
def identical(self, other, equiv=duck_array_ops.array_equiv):
"""Like equals, but also checks attributes.
"""
try:
return utils.dict_equiv(self.attrs, other.attrs) and self.equals(
other, equiv=equiv
)
except (TypeError, AttributeError):
return False
def no_conflicts(self, other, equiv=duck_array_ops.array_notnull_equiv):
"""True if the intersection of two Variable's non-null data is
equal; otherwise false.
Variables can thus still be equal if there are locations where either,
or both, contain NaN values.
"""
return self.broadcast_equals(other, equiv=equiv)
def quantile(
self, q, dim=None, interpolation="linear", keep_attrs=None, skipna=True
):
"""Compute the qth quantile of the data along the specified dimension.
Returns the qth quantiles(s) of the array elements.
Parameters
----------
q : float in range of [0,1] (or sequence of floats)
Quantile to compute, which must be between 0 and 1
inclusive.
dim : str or sequence of str, optional
Dimension(s) over which to apply quantile.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to
use when the desired quantile lies between two data points
``i < j``:
* linear: ``i + (j - i) * fraction``, where ``fraction`` is
the fractional part of the index surrounded by ``i`` and
``j``.
* lower: ``i``.
* higher: ``j``.
* nearest: ``i`` or ``j``, whichever is nearest.
* midpoint: ``(i + j) / 2``.
keep_attrs : bool, optional
If True, the variable's attributes (`attrs`) will be copied from
the original object to the new one. If False (default), the new
object will be returned without attributes.
Returns
-------
quantiles : Variable
If `q` is a single quantile, then the result
is a scalar. If multiple percentiles are given, first axis of
the result corresponds to the quantile and a quantile dimension
is added to the return array. The other dimensions are the
dimensions that remain after the reduction of the array.
See Also
--------
numpy.nanquantile, pandas.Series.quantile, Dataset.quantile,
DataArray.quantile
"""
from .computation import apply_ufunc
_quantile_func = np.nanquantile if skipna else np.quantile
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=False)
scalar = utils.is_scalar(q)
q = np.atleast_1d(np.asarray(q, dtype=np.float64))
if dim is None:
dim = self.dims
if utils.is_scalar(dim):
dim = [dim]
def _wrapper(npa, **kwargs):
# move quantile axis to end. required for apply_ufunc
return np.moveaxis(_quantile_func(npa, **kwargs), 0, -1)
axis = np.arange(-1, -1 * len(dim) - 1, -1)
result = apply_ufunc(
_wrapper,
self,
input_core_dims=[dim],
exclude_dims=set(dim),
output_core_dims=[["quantile"]],
output_dtypes=[np.float64],
output_sizes={"quantile": len(q)},
dask="parallelized",
kwargs={"q": q, "axis": axis, "interpolation": interpolation},
)
# for backward compatibility
result = result.transpose("quantile", ...)
if scalar:
result = result.squeeze("quantile")
if keep_attrs:
result.attrs = self._attrs
return result
def rank(self, dim, pct=False):
"""Ranks the data.
Equal values are assigned a rank that is the average of the ranks that
would have been otherwise assigned to all of the values within that
set. Ranks begin at 1, not 0. If `pct`, computes percentage ranks.
NaNs in the input array are returned as NaNs.
The `bottleneck` library is required.
Parameters
----------
dim : str
Dimension over which to compute rank.
pct : bool, optional
If True, compute percentage ranks, otherwise compute integer ranks.
Returns
-------
ranked : Variable
See Also
--------
Dataset.rank, DataArray.rank
"""
import bottleneck as bn
data = self.data
if isinstance(data, dask_array_type):
raise TypeError(
"rank does not work for arrays stored as dask "
"arrays. Load the data via .compute() or .load() "
"prior to calling this method."
)
elif not isinstance(data, np.ndarray):
raise TypeError(
"rank is not implemented for {} objects.".format(type(data))
)
axis = self.get_axis_num(dim)
func = bn.nanrankdata if self.dtype.kind == "f" else bn.rankdata
ranked = func(data, axis=axis)
if pct:
count = np.sum(~np.isnan(data), axis=axis, keepdims=True)
ranked /= count
return Variable(self.dims, ranked)
def rolling_window(
self, dim, window, window_dim, center=False, fill_value=dtypes.NA
):
"""
Make a rolling_window along dim and add a new_dim to the last place.
Parameters
----------
dim: str
Dimension over which to compute rolling_window
window: int
Window size of the rolling
window_dim: str
New name of the window dimension.
center: boolean. default False.
If True, pad fill_value for both ends. Otherwise, pad in the head
of the axis.
fill_value:
value to be filled.
Returns
-------
Variable that is a view of the original array with a added dimension of
size w.
The return dim: self.dims + (window_dim, )
The return shape: self.shape + (window, )
Examples
--------
>>> v = Variable(("a", "b"), np.arange(8).reshape((2, 4)))
>>> v.rolling_window(x, "b", 3, "window_dim")
<xarray.Variable (a: 2, b: 4, window_dim: 3)>
array([[[nan, nan, 0], [nan, 0, 1], [0, 1, 2], [1, 2, 3]],
[[nan, nan, 4], [nan, 4, 5], [4, 5, 6], [5, 6, 7]]])
>>> v.rolling_window(x, "b", 3, "window_dim", center=True)
<xarray.Variable (a: 2, b: 4, window_dim: 3)>
array([[[nan, 0, 1], [0, 1, 2], [1, 2, 3], [2, 3, nan]],
[[nan, 4, 5], [4, 5, 6], [5, 6, 7], [6, 7, nan]]])
"""
if fill_value is dtypes.NA: # np.nan is passed
dtype, fill_value = dtypes.maybe_promote(self.dtype)
array = self.astype(dtype, copy=False).data
else:
dtype = self.dtype
array = self.data
new_dims = self.dims + (window_dim,)
return Variable(
new_dims,
duck_array_ops.rolling_window(
array,
axis=self.get_axis_num(dim),
window=window,
center=center,
fill_value=fill_value,
),
)
def coarsen(self, windows, func, boundary="exact", side="left", **kwargs):
"""
Apply reduction function.
"""
windows = {k: v for k, v in windows.items() if k in self.dims}
if not windows:
return self.copy()
reshaped, axes = self._coarsen_reshape(windows, boundary, side)
if isinstance(func, str):
name = func
func = getattr(duck_array_ops, name, None)
if func is None:
raise NameError(f"{name} is not a valid method.")
return self._replace(data=func(reshaped, axis=axes, **kwargs))
def _coarsen_reshape(self, windows, boundary, side):
"""
Construct a reshaped-array for coarsen
"""
if not utils.is_dict_like(boundary):
boundary = {d: boundary for d in windows.keys()}
if not utils.is_dict_like(side):
side = {d: side for d in windows.keys()}
# remove unrelated dimensions
boundary = {k: v for k, v in boundary.items() if k in windows}
side = {k: v for k, v in side.items() if k in windows}
for d, window in windows.items():
if window <= 0:
raise ValueError(f"window must be > 0. Given {window}")
variable = self
for d, window in windows.items():
# trim or pad the object
size = variable.shape[self._get_axis_num(d)]
n = int(size / window)
if boundary[d] == "exact":
if n * window != size:
raise ValueError(
"Could not coarsen a dimension of size {} with "
"window {}".format(size, window)
)
elif boundary[d] == "trim":
if side[d] == "left":
variable = variable.isel({d: slice(0, window * n)})
else:
excess = size - window * n
variable = variable.isel({d: slice(excess, None)})
elif boundary[d] == "pad": # pad
pad = window * n - size
if pad < 0:
pad += window
if side[d] == "left":
pad_width = {d: (0, pad)}
else:
pad_width = {d: (pad, 0)}
variable = variable.pad(pad_width, mode="constant")
else:
raise TypeError(
"{} is invalid for boundary. Valid option is 'exact', "
"'trim' and 'pad'".format(boundary[d])
)
shape = []
axes = []
axis_count = 0
for i, d in enumerate(variable.dims):
if d in windows:
size = variable.shape[i]
shape.append(int(size / windows[d]))
shape.append(windows[d])
axis_count += 1
axes.append(i + axis_count)
else:
shape.append(variable.shape[i])
keep_attrs = _get_keep_attrs(default=False)
variable.attrs = variable._attrs if keep_attrs else {}
return variable.data.reshape(shape), tuple(axes)
@property
def real(self):
return type(self)(self.dims, self.data.real, self._attrs)
@property
def imag(self):
return type(self)(self.dims, self.data.imag, self._attrs)
def __array_wrap__(self, obj, context=None):
return Variable(self.dims, obj)
@staticmethod
def _unary_op(f):
@functools.wraps(f)
def func(self, *args, **kwargs):
with np.errstate(all="ignore"):
return self.__array_wrap__(f(self.data, *args, **kwargs))
return func
@staticmethod
def _binary_op(f, reflexive=False, **ignored_kwargs):
@functools.wraps(f)
def func(self, other):
if isinstance(other, (xr.DataArray, xr.Dataset)):
return NotImplemented
self_data, other_data, dims = _broadcast_compat_data(self, other)
keep_attrs = _get_keep_attrs(default=False)
attrs = self._attrs if keep_attrs else None
with np.errstate(all="ignore"):
new_data = (
f(self_data, other_data)
if not reflexive
else f(other_data, self_data)
)
result = Variable(dims, new_data, attrs=attrs)
return result
return func
@staticmethod
def _inplace_binary_op(f):
@functools.wraps(f)
def func(self, other):
if isinstance(other, xr.Dataset):
raise TypeError("cannot add a Dataset to a Variable in-place")
self_data, other_data, dims = _broadcast_compat_data(self, other)
if dims != self.dims:
raise ValueError("dimensions cannot change for in-place " "operations")
with np.errstate(all="ignore"):
self.values = f(self_data, other_data)
return self
return func
def _to_numeric(self, offset=None, datetime_unit=None, dtype=float):
""" A (private) method to convert datetime array to numeric dtype
See duck_array_ops.datetime_to_numeric
"""
numeric_array = duck_array_ops.datetime_to_numeric(
self.data, offset, datetime_unit, dtype
)
return type(self)(self.dims, numeric_array, self._attrs)
ops.inject_all_ops_and_reduce_methods(Variable)
class IndexVariable(Variable):
"""Wrapper for accommodating a pandas.Index in an xarray.Variable.
IndexVariable preserve loaded values in the form of a pandas.Index instead
of a NumPy array. Hence, their values are immutable and must always be one-
dimensional.
They also have a name property, which is the name of their sole dimension
unless another name is given.
"""
__slots__ = ()
def __init__(self, dims, data, attrs=None, encoding=None, fastpath=False):
super().__init__(dims, data, attrs, encoding, fastpath)
if self.ndim != 1:
raise ValueError("%s objects must be 1-dimensional" % type(self).__name__)
# Unlike in Variable, always eagerly load values into memory
if not isinstance(self._data, PandasIndexAdapter):
self._data = PandasIndexAdapter(self._data)
def __dask_tokenize__(self):
from dask.base import normalize_token
# Don't waste time converting pd.Index to np.ndarray
return normalize_token((type(self), self._dims, self._data.array, self._attrs))
def load(self):
# data is already loaded into memory for IndexVariable
return self
# https://github.com/python/mypy/issues/1465
@Variable.data.setter # type: ignore
def data(self, data):
raise ValueError(
f"Cannot assign to the .data attribute of dimension coordinate a.k.a IndexVariable {self.name!r}. "
f"Please use DataArray.assign_coords, Dataset.assign_coords or Dataset.assign as appropriate."
)
@Variable.values.setter # type: ignore
def values(self, values):
raise ValueError(
f"Cannot assign to the .values attribute of dimension coordinate a.k.a IndexVariable {self.name!r}. "
f"Please use DataArray.assign_coords, Dataset.assign_coords or Dataset.assign as appropriate."
)
def chunk(self, chunks=None, name=None, lock=False):
# Dummy - do not chunk. This method is invoked e.g. by Dataset.chunk()
return self.copy(deep=False)
def _as_sparse(self, sparse_format=_default, fill_value=_default):
# Dummy
return self.copy(deep=False)
def _to_dense(self):
# Dummy
return self.copy(deep=False)
def _finalize_indexing_result(self, dims, data):
if getattr(data, "ndim", 0) != 1:
# returns Variable rather than IndexVariable if multi-dimensional
return Variable(dims, data, self._attrs, self._encoding)
else:
return type(self)(dims, data, self._attrs, self._encoding, fastpath=True)
def __setitem__(self, key, value):
raise TypeError("%s values cannot be modified" % type(self).__name__)
@classmethod
def concat(cls, variables, dim="concat_dim", positions=None, shortcut=False):
"""Specialized version of Variable.concat for IndexVariable objects.
This exists because we want to avoid converting Index objects to NumPy
arrays, if possible.
"""
if not isinstance(dim, str):
(dim,) = dim.dims
variables = list(variables)
first_var = variables[0]
if any(not isinstance(v, cls) for v in variables):
raise TypeError(
"IndexVariable.concat requires that all input "
"variables be IndexVariable objects"
)
indexes = [v._data.array for v in variables]
if not indexes:
data = []
else:
data = indexes[0].append(indexes[1:])
if positions is not None:
indices = nputils.inverse_permutation(np.concatenate(positions))
data = data.take(indices)
attrs = dict(first_var.attrs)
if not shortcut:
for var in variables:
if var.dims != first_var.dims:
raise ValueError("inconsistent dimensions")
utils.remove_incompatible_items(attrs, var.attrs)
return cls(first_var.dims, data, attrs)
def copy(self, deep=True, data=None):
"""Returns a copy of this object.
`deep` is ignored since data is stored in the form of
pandas.Index, which is already immutable. Dimensions, attributes
and encodings are always copied.
Use `data` to create a new object with the same structure as
original but entirely new data.
Parameters
----------
deep : bool, optional
Deep is ignored when data is given. Whether the data array is
loaded into memory and copied onto the new object. Default is True.
data : array_like, optional
Data to use in the new object. Must have same shape as original.
Returns
-------
object : Variable
New object with dimensions, attributes, encodings, and optionally
data copied from original.
"""
if data is None:
data = self._data.copy(deep=deep)
else:
data = as_compatible_data(data)
if self.shape != data.shape:
raise ValueError(
"Data shape {} must match shape of object {}".format(
data.shape, self.shape
)
)
return type(self)(self.dims, data, self._attrs, self._encoding, fastpath=True)
def equals(self, other, equiv=None):
# if equiv is specified, super up
if equiv is not None:
return super().equals(other, equiv)
# otherwise use the native index equals, rather than looking at _data
other = getattr(other, "variable", other)
try:
return self.dims == other.dims and self._data_equals(other)
except (TypeError, AttributeError):
return False
def _data_equals(self, other):
return self.to_index().equals(other.to_index())
def to_index_variable(self):
"""Return this variable as an xarray.IndexVariable"""
return self
to_coord = utils.alias(to_index_variable, "to_coord")
def to_index(self):
"""Convert this variable to a pandas.Index"""
# n.b. creating a new pandas.Index from an old pandas.Index is
# basically free as pandas.Index objects are immutable
assert self.ndim == 1
index = self._data.array
if isinstance(index, pd.MultiIndex):
# set default names for multi-index unnamed levels so that
# we can safely rename dimension / coordinate later
valid_level_names = [
name or "{}_level_{}".format(self.dims[0], i)
for i, name in enumerate(index.names)
]
index = index.set_names(valid_level_names)
else:
index = index.set_names(self.name)
return index
@property
def level_names(self):
"""Return MultiIndex level names or None if this IndexVariable has no
MultiIndex.
"""
index = self.to_index()
if isinstance(index, pd.MultiIndex):
return index.names
else:
return None
def get_level_variable(self, level):
"""Return a new IndexVariable from a given MultiIndex level."""
if self.level_names is None:
raise ValueError("IndexVariable %r has no MultiIndex" % self.name)
index = self.to_index()
return type(self)(self.dims, index.get_level_values(level))
@property
def name(self):
return self.dims[0]
@name.setter
def name(self, value):
raise AttributeError("cannot modify name of IndexVariable in-place")
# for backwards compatibility
Coordinate = utils.alias(IndexVariable, "Coordinate")
def _unified_dims(variables):
# validate dimensions
all_dims = {}
for var in variables:
var_dims = var.dims
if len(set(var_dims)) < len(var_dims):
raise ValueError(
"broadcasting cannot handle duplicate "
"dimensions: %r" % list(var_dims)
)
for d, s in zip(var_dims, var.shape):
if d not in all_dims:
all_dims[d] = s
elif all_dims[d] != s:
raise ValueError(
"operands cannot be broadcast together "
"with mismatched lengths for dimension %r: %s"
% (d, (all_dims[d], s))
)
return all_dims
def _broadcast_compat_variables(*variables):
"""Create broadcast compatible variables, with the same dimensions.
Unlike the result of broadcast_variables(), some variables may have
dimensions of size 1 instead of the the size of the broadcast dimension.
"""
dims = tuple(_unified_dims(variables))
return tuple(var.set_dims(dims) if var.dims != dims else var for var in variables)
def broadcast_variables(*variables):
"""Given any number of variables, return variables with matching dimensions
and broadcast data.
The data on the returned variables will be a view of the data on the
corresponding original arrays, but dimensions will be reordered and
inserted so that both broadcast arrays have the same dimensions. The new
dimensions are sorted in order of appearance in the first variable's
dimensions followed by the second variable's dimensions.
"""
dims_map = _unified_dims(variables)
dims_tuple = tuple(dims_map)
return tuple(
var.set_dims(dims_map) if var.dims != dims_tuple else var for var in variables
)
def _broadcast_compat_data(self, other):
if all(hasattr(other, attr) for attr in ["dims", "data", "shape", "encoding"]):
# `other` satisfies the necessary Variable API for broadcast_variables
new_self, new_other = _broadcast_compat_variables(self, other)
self_data = new_self.data
other_data = new_other.data
dims = new_self.dims
else:
# rely on numpy broadcasting rules
self_data = self.data
other_data = other
dims = self.dims
return self_data, other_data, dims
def concat(variables, dim="concat_dim", positions=None, shortcut=False):
"""Concatenate variables along a new or existing dimension.
Parameters
----------
variables : iterable of Array
Arrays to stack together. Each variable is expected to have
matching dimensions and shape except for along the stacked
dimension.
dim : str or DataArray, optional
Name of the dimension to stack along. This can either be a new
dimension name, in which case it is added along axis=0, or an
existing dimension name, in which case the location of the
dimension is unchanged. Where to insert the new dimension is
determined by the first variable.
positions : None or list of integer arrays, optional
List of integer arrays which specifies the integer positions to which
to assign each dataset along the concatenated dimension. If not
supplied, objects are concatenated in the provided order.
shortcut : bool, optional
This option is used internally to speed-up groupby operations.
If `shortcut` is True, some checks of internal consistency between
arrays to concatenate are skipped.
Returns
-------
stacked : Variable
Concatenated Variable formed by stacking all the supplied variables
along the given dimension.
"""
variables = list(variables)
if all(isinstance(v, IndexVariable) for v in variables):
return IndexVariable.concat(variables, dim, positions, shortcut)
else:
return Variable.concat(variables, dim, positions, shortcut)
def assert_unique_multiindex_level_names(variables):
"""Check for uniqueness of MultiIndex level names in all given
variables.
Not public API. Used for checking consistency of DataArray and Dataset
objects.
"""
level_names = defaultdict(list)
all_level_names = set()
for var_name, var in variables.items():
if isinstance(var._data, PandasIndexAdapter):
idx_level_names = var.to_index_variable().level_names
if idx_level_names is not None:
for n in idx_level_names:
level_names[n].append(f"{n!r} ({var_name})")
if idx_level_names:
all_level_names.update(idx_level_names)
for k, v in level_names.items():
if k in variables:
v.append("(%s)" % k)
duplicate_names = [v for v in level_names.values() if len(v) > 1]
if duplicate_names:
conflict_str = "\n".join(", ".join(v) for v in duplicate_names)
raise ValueError("conflicting MultiIndex level name(s):\n%s" % conflict_str)
# Check confliction between level names and dimensions GH:2299
for k, v in variables.items():
for d in v.dims:
if d in all_level_names:
raise ValueError(
"conflicting level / dimension names. {} "
"already exists as a level name.".format(d)
)
| apache-2.0 |
kgullikson88/Chiron-Scripts | AnalyzeSensitivityAnalysis.py | 1 | 12302 | import os
import sys
from collections import defaultdict
from operator import itemgetter
import FittingUtilities
import matplotlib.pyplot as plt
import numpy as np
from astropy.io import fits as pyfits
from astropy import units
import SpectralTypeRelations
import StarData
import PlotBlackbodies
"""
Program to analyze the output of SensitivityAnalysis, and make some pretty plots!
Command line arguments:
-combine: will combine several output (say as generated by xgrid) NOT YET IMPLEMENTED
-xaxis: specifies the variable to use as the x axis. Choices are as follows
SecondarySpectralType
SecondaryMass
MassRatio
DetectionRate
AverageSignificance
MagnitudeDifference
-yaxis: specifies the variable to use for the y axis. Choices are the same as for -xaxis
-infile: specifies the input filename (default is Sensitivity/summary.dat).
If combine is True, the input filename should be a list of comma-separated
filenames
"""
def MakeSummaryFile(directory, prefix, outfilename="Sensitivity/logfile.dat", tolerance=10.0):
# Read in all the correlation files
allfiles = [f for f in os.listdir(directory) if f.startswith(prefix)]
#Figure out the primary mass
MS = SpectralTypeRelations.MainSequence()
header = pyfits.getheader(prefix + ".fits")
starname = header["OBJECT"].split()[0].replace("_", " ")
stardata = StarData.GetData(starname)
primary_mass = MS.Interpolate(MS.Mass, stardata.spectype[:2])
primary_temp = MS.Interpolate(MS.Temperature, stardata.spectype[:2])
detections = defaultdict(list)
outfile = open(outfilename, "w")
outfile.write("Sensitivity Analysis:\n*****************************\n\n")
outfile.write(
"Filename\t\t\tPrimary Temperature\tSecondary Temperature\tMass (Msun)\tMass Ratio\tVelocity\tPeak Correct?\tSignificance\n")
for fname in allfiles:
#Read in temperature and expected velocity from filename
T = float(fname.split(prefix)[-1].split("t")[-1].split("_")[0])
v = float(fname.split("v")[-1])
#Figure out the secondary mass from the temperature
spt = MS.GetSpectralType(MS.Temperature, T)
secondary_mass = MS.Interpolate(MS.Mass, spt)
q = secondary_mass / primary_mass
#Find the maximum in the cross-correlation function
vel, corr = np.loadtxt(directory + fname, unpack=True)
idx = np.argmax(corr)
vmax = vel[idx]
fit = FittingUtilities.Continuum(vel, corr, fitorder=2, lowreject=3, highreject=3)
corr -= fit
mean = corr.mean()
std = corr.std()
significance = (corr[idx] - mean) / std
if np.abs(vmax - v) <= tolerance:
#Signal found!
outfile.write("%s\t%i\t\t\t%i\t\t\t\t%.2f\t\t%.4f\t\t%i\t\tyes\t\t%.2f\n" % (
prefix, primary_temp, T, secondary_mass, q, v, significance))
else:
outfile.write("%s\t%i\t\t\t%i\t\t\t\t%.2f\t\t%.4f\t\t%i\t\tno\t\t%.2f\n" % (
prefix, primary_temp, T, secondary_mass, q, v, significance))
outfile.close()
def MakePlot(infilename):
# Set up thing to cycle through matplotlib linestyles
from itertools import cycle
lines = ["-", "--", "-.", ":"]
linecycler = cycle(lines)
#Defaults
combine = False
xaxis = "SecondarySpectralType"
yaxis = "DetectionRate"
#Command-line overrides
for arg in sys.argv:
if "combine" in arg:
combine = True
elif "xaxis" in arg:
xaxis = arg.split("=")[-1]
elif "yaxis" in arg:
yaxis = arg.split("=")[-1]
elif "infile" in arg:
infilename = arg.split("=")[-1]
if combine and "," in infilename:
infiles = infilename.split(",")
else:
infiles = [infilename, ]
#Set up dictionaries/lists
p_spt = defaultdict(list) #Primary spectral type
s_spt = defaultdict(list) #Secondary spectral type
s_temp = defaultdict(list) #Secondary Temperature
p_mass = defaultdict(list) #Primary mass
s_mass = defaultdict(list) #Secondary mass
q = defaultdict(list) #Mass ratio
det_rate = defaultdict(list) #Detection rate
sig = defaultdict(list) #Average detection significance
magdiff = defaultdict(list) #Magnitude difference
namedict = {"SecondarySpectralType": s_spt,
"SecondaryTemperature": s_temp,
"SecondaryMass": s_mass,
"MassRatio": q,
"DetectionRate": det_rate,
"AverageSignificance": sig,
"MagnitudeDifference": magdiff}
labeldict = {"SecondarySpectralType": "Secondary Spectral Type",
"SecondaryTemperature": "Secondary Temperature (K)",
"SecondaryMass": "SecondaryMass (Solar Masses)",
"MassRatio": "Mass Ratio",
"DetectionRate": "Detection Rate",
"AverageSignificance": "Average Significance",
"MagnitudeDifference": "Magnitude Difference"}
if xaxis not in namedict.keys() or yaxis not in namedict:
print "Error! axis keywords must be one of the following:"
for key in namedict.keys():
print key
print "You chose %s for the x axis and %s for the y axis" % (xaxis, yaxis)
sys.exit()
MS = SpectralTypeRelations.MainSequence()
vband = np.arange(500, 600, 1) * units.nm.to(units.cm)
#Read in file/files WARNING! ASSUMES A CERTAIN FORMAT. MUST CHANGE THIS IF THE FORMAT CHANGES!
for infilename in infiles:
infile = open(infilename)
lines = infile.readlines()
infile.close()
print "Reading file %s" % infilename
current_temp = float(lines[4].split()[2])
fname = lines[4].split()[0].split("/")[-1]
starname = pyfits.getheader(fname)['object']
detections = 0.0
numsamples = 0.0
significance = []
starname_dict = {fname: starname}
spt_dict = {}
current_fname = fname
for iternum, line in enumerate(lines[4:]):
segments = line.split()
fname = segments[0].split("/")[-1]
T2 = float(segments[2])
if fname in starname_dict and T2 == current_temp and current_fname == fname:
# Do the time-consuming SpectralType calls
T1 = float(segments[1])
if T1 in spt_dict:
p_spectype = spt_dict[T1][0]
R1 = spt_dict[T1][1]
else:
p_spectype = MS.GetSpectralType(MS.Temperature, T1)
R1 = MS.Interpolate(MS.Radius, p_spectype)
spt_dict[T1] = (p_spectype, R1)
if T2 in spt_dict:
s_spectype = spt_dict[T2][0]
R2 = spt_dict[T2][1]
else:
s_spectype = MS.GetSpectralType(MS.Temperature, T2)
R2 = MS.Interpolate(MS.Radius, s_spectype)
spt_dict[T2] = (s_spectype, R2)
fluxratio = (PlotBlackbodies.Planck(vband, T1) / PlotBlackbodies.Planck(vband, T2)).mean() \
* (R1 / R2) ** 2
sec_mass = float(segments[3])
massratio = float(segments[4])
starname = starname_dict[fname]
if "y" in segments[6]:
detections += 1.
significance.append(float(segments[7]))
numsamples += 1.
else:
s_spt[starname].append(s_spectype)
s_temp[starname].append(current_temp)
p_spt[starname].append(p_spectype)
p_mass[starname].append(sec_mass / massratio)
s_mass[starname].append(sec_mass)
q[starname].append(massratio)
det_rate[starname].append(detections / numsamples)
sig[starname].append(np.mean(significance))
magdiff[starname].append(2.5 * np.log10(fluxratio))
# Reset things
current_temp = T2
current_fname = fname
fname = segments[0].split("/")[-1]
if fname in starname_dict:
starname = starname_dict[fname]
else:
starname = pyfits.getheader(fname)['object']
starname_dict[fname] = starname
numsamples = 0.0
detections = 0.0
significance = []
#Process this line for the next star
T1 = float(segments[1])
if T1 in spt_dict:
p_spectype = spt_dict[T1][0]
R1 = spt_dict[T1][1]
else:
p_spectype = MS.GetSpectralType(MS.Temperature, T1)
R1 = MS.Interpolate(MS.Radius, p_spectype)
spt_dict[T1] = (p_spectype, R1)
if T2 in spt_dict:
s_spectype = spt_dict[T2][0]
R2 = spt_dict[T2][1]
else:
s_spectype = MS.GetSpectralType(MS.Temperature, T2)
R2 = MS.Interpolate(MS.Radius, s_spectype)
spt_dict[T2] = (s_spectype, R2)
fluxratio = (PlotBlackbodies.Planck(vband, T1) / PlotBlackbodies.Planck(vband, T2)).mean() \
* (R1 / R2) ** 2
sec_mass = float(segments[3])
massratio = float(segments[4])
if "y" in segments[6]:
detections += 1.
significance.append(float(segments[7]))
numsamples += 1.
#plot
print "Plotting now"
spt_sorter = {"O": 1, "B": 2, "A": 3, "F": 4, "G": 5, "K": 6, "M": 7}
fcn = lambda s: (spt_sorter[itemgetter(0)(s)], itemgetter(1)(s))
#print sorted(s_spt.keys(), key=fcn)
#for starname in sorted(s_spt.keys(), key=fcn):
print sorted(s_spt.keys())
for starname in sorted(s_spt.keys()):
p_spectype = p_spt[starname]
x = namedict[xaxis][starname]
y = namedict[yaxis][starname]
if "SpectralType" in xaxis:
plt.plot(range(len(x)), y[::-1], linestyle=next(linecycler), linewidth=2,
label="%s (%s)" % (starname, p_spectype[0]))
plt.xticks(range(len(x)), x[::-1], size="small")
elif "SpectralType" in yaxis:
plt.plot(x[::-1], range(len(y)), linestyle=next(linecycler), linewidth=2,
label="%s (%s)" % (starname, p_spectype[0]))
plt.yticks(range(len(y)), y[::-1], size="small")
else:
plt.plot(x, y, linestyle=next(linecycler), linewidth=2, label="%s (%s)" % (starname, p_spectype[0]))
if "Magnitude" in xaxis:
ax = plt.gca()
ax.set_xlim(ax.get_xlim()[::-1])
elif "Magnitude" in yaxis:
ax = plt.gca()
ax.set_ylim(ax.get_ylim()[::-1])
leg = plt.legend(loc='best', fancybox=True)
leg.get_frame().set_alpha(0.4)
plt.xlabel(labeldict[xaxis], fontsize=15)
plt.ylabel(labeldict[yaxis], fontsize=15)
if "DetectionRate" in yaxis:
ax = plt.gca()
ax.set_ylim([-0.05, 1.05])
#plt.title("Sensitivity Analysis")
plt.grid(True)
plt.show()
if __name__ == "__main__":
if any(["new" in f for f in sys.argv[1:]]):
directory = "Sensitivity/"
allfiles = [f for f in os.listdir(directory) if (f.startswith("HIP") or f.startswith("HR"))]
prefixes = []
for fname in allfiles:
prefix = fname.split("_v")[0][:-6]
if prefix not in prefixes:
print "New prefix: %s" % prefix
prefixes.append(prefix)
for i, prefix in enumerate(prefixes):
MakeSummaryFile(directory, prefix, outfilename="%slogfile%i.txt" % (directory, i + 1))
MakePlot("%slogfile%i.txt" % (directory, i + 1))
else:
MakePlot("Sensitivity/logfile.dat")
| gpl-3.0 |
arjoly/scikit-learn | examples/covariance/plot_outlier_detection.py | 235 | 3891 | """
==========================================
Outlier detection with several methods.
==========================================
When the amount of contamination is known, this example illustrates two
different ways of performing :ref:`outlier_detection`:
- based on a robust estimator of covariance, which is assuming that the
data are Gaussian distributed and performs better than the One-Class SVM
in that case.
- using the One-Class SVM and its ability to capture the shape of the
data set, hence performing better when the data is strongly
non-Gaussian, i.e. with two well-separated clusters;
The ground truth about inliers and outliers is given by the points colors
while the orange-filled area indicates which points are reported as inliers
by each method.
Here, we assume that we know the fraction of outliers in the datasets.
Thus rather than using the 'predict' method of the objects, we set the
threshold on the decision_function to separate out the corresponding
fraction.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from scipy import stats
from sklearn import svm
from sklearn.covariance import EllipticEnvelope
# Example settings
n_samples = 200
outliers_fraction = 0.25
clusters_separation = [0, 1, 2]
# define two outlier detection tools to be compared
classifiers = {
"One-Class SVM": svm.OneClassSVM(nu=0.95 * outliers_fraction + 0.05,
kernel="rbf", gamma=0.1),
"robust covariance estimator": EllipticEnvelope(contamination=.1)}
# Compare given classifiers under given settings
xx, yy = np.meshgrid(np.linspace(-7, 7, 500), np.linspace(-7, 7, 500))
n_inliers = int((1. - outliers_fraction) * n_samples)
n_outliers = int(outliers_fraction * n_samples)
ground_truth = np.ones(n_samples, dtype=int)
ground_truth[-n_outliers:] = 0
# Fit the problem with varying cluster separation
for i, offset in enumerate(clusters_separation):
np.random.seed(42)
# Data generation
X1 = 0.3 * np.random.randn(0.5 * n_inliers, 2) - offset
X2 = 0.3 * np.random.randn(0.5 * n_inliers, 2) + offset
X = np.r_[X1, X2]
# Add outliers
X = np.r_[X, np.random.uniform(low=-6, high=6, size=(n_outliers, 2))]
# Fit the model with the One-Class SVM
plt.figure(figsize=(10, 5))
for i, (clf_name, clf) in enumerate(classifiers.items()):
# fit the data and tag outliers
clf.fit(X)
y_pred = clf.decision_function(X).ravel()
threshold = stats.scoreatpercentile(y_pred,
100 * outliers_fraction)
y_pred = y_pred > threshold
n_errors = (y_pred != ground_truth).sum()
# plot the levels lines and the points
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
subplot = plt.subplot(1, 2, i + 1)
subplot.set_title("Outlier detection")
subplot.contourf(xx, yy, Z, levels=np.linspace(Z.min(), threshold, 7),
cmap=plt.cm.Blues_r)
a = subplot.contour(xx, yy, Z, levels=[threshold],
linewidths=2, colors='red')
subplot.contourf(xx, yy, Z, levels=[threshold, Z.max()],
colors='orange')
b = subplot.scatter(X[:-n_outliers, 0], X[:-n_outliers, 1], c='white')
c = subplot.scatter(X[-n_outliers:, 0], X[-n_outliers:, 1], c='black')
subplot.axis('tight')
subplot.legend(
[a.collections[0], b, c],
['learned decision function', 'true inliers', 'true outliers'],
prop=matplotlib.font_manager.FontProperties(size=11))
subplot.set_xlabel("%d. %s (errors: %d)" % (i + 1, clf_name, n_errors))
subplot.set_xlim((-7, 7))
subplot.set_ylim((-7, 7))
plt.subplots_adjust(0.04, 0.1, 0.96, 0.94, 0.1, 0.26)
plt.show()
| bsd-3-clause |
cosmoharrigan/opencog | scripts/make_benchmark_graphs.py | 56 | 3139 | #!/usr/bin/env python
# Requires matplotlib for graphing
# reads *_benchmark.csv files as output by atomspace_bm and turns them into
# graphs.
import csv
import numpy as np
import matplotlib.colors as colors
#import matplotlib.finance as finance
import matplotlib.dates as mdates
import matplotlib.ticker as mticker
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
#import matplotlib.font_manager as font_manager
import glob
import pdb
def moving_average(x, n, type='simple'):
"""
compute an n period moving average.
type is 'simple' | 'exponential'
"""
x = np.asarray(x)
if type=='simple':
weights = np.ones(n)
else:
weights = np.exp(np.linspace(-1., 0., n))
weights /= weights.sum()
a = np.convolve(x, weights, mode='full')[:len(x)]
a[:n] = a[n]
return a
def graph_file(fn,delta_rss=True):
print "Graphing " + fn
records = csv.reader(open(fn,'rb'),delimiter=",")
sizes=[]; times=[]; times_seconds=[]; memories=[]
for row in records:
sizes.append(int(row[0]))
times.append(int(row[1]))
memories.append(int(row[2]))
times_seconds.append(float(row[3]))
left, width = 0.1, 0.8
rect1 = [left, 0.5, width, 0.4] #left, bottom, width, height
rect2 = [left, 0.1, width, 0.4]
fig = plt.figure(facecolor='white')
axescolor = '#f6f6f6' # the axies background color
ax1 = fig.add_axes(rect1, axisbg=axescolor)
ax2 = fig.add_axes(rect2, axisbg=axescolor, sharex=ax1)
ax1.plot(sizes,times_seconds,color='black')
if len(times_seconds) > 1000:
ax1.plot(sizes,moving_average(times_seconds,len(times_second) / 100),color='blue')
if delta_rss:
oldmemories = list(memories)
for i in range(1,len(memories)): memories[i] = oldmemories[i] - oldmemories[i-1]
ax2.plot(sizes,memories,color='black')
for label in ax1.get_xticklabels():
label.set_visible(False)
class MyLocator(mticker.MaxNLocator):
def __init__(self, *args, **kwargs):
mticker.MaxNLocator.__init__(self, *args, **kwargs)
def __call__(self, *args, **kwargs):
return mticker.MaxNLocator.__call__(self, *args, **kwargs)
# at most 7 ticks, pruning the upper and lower so they don't overlap
# with other ticks
fmt = mticker.ScalarFormatter()
fmt.set_powerlimits((-3, 4))
ax1.yaxis.set_major_formatter(fmt)
ax2.yaxis.set_major_locator(MyLocator(7, prune='upper'))
fmt = mticker.ScalarFormatter()
fmt.set_powerlimits((-3, 4))
ax2.yaxis.set_major_formatter(fmt)
ax2.yaxis.offsetText.set_visible(False)
fig.show()
size = int(fmt.orderOfMagnitude) / 3
labels = ["B","KB","MB","GB"]
label = labels[size]
labels = ["","(10s)","(100s)"]
label += " " + labels[int(fmt.orderOfMagnitude) % 3]
ax2.set_xlabel("AtomSpace Size")
ax2.set_ylabel("RSS " + label)
ax1.set_ylabel("Time (seconds)")
ax1.set_title(fn)
fig.show()
fig.savefig(fn+".png",format="png")
files_to_graph = glob.glob("*_benchmark.csv")
for fn in files_to_graph:
graph_file(fn);
| agpl-3.0 |
marcino239/gps | vdp_single_shooting.py | 1 | 2712 | #
# This file is part of CasADi.
#
# CasADi -- A symbolic framework for dynamic optimization.
# Copyright (C) 2010-2014 Joel Andersson, Joris Gillis, Moritz Diehl,
# K.U. Leuven. All rights reserved.
# Copyright (C) 2011-2014 Greg Horn
#
# CasADi is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# CasADi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with CasADi; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
#
import casadi as ca
import numpy as NP
import matplotlib.pyplot as plt
from operator import itemgetter
nk = 20 # Control discretization
tf = 10.0 # End time
# Declare variables (use scalar graph)
u = ca.SX.sym("u") # control
x = ca.SX.sym("x",2) # states
# ODE right hand side and quadratures
xdot = ca.vertcat( [(1 - x[1]*x[1])*x[0] - x[1] + u, x[0]] )
qdot = x[0]*x[0] + x[1]*x[1] + u*u
# DAE residual function
dae = ca.SXFunction("dae", ca.daeIn(x=x, p=u), ca.daeOut(ode=xdot, quad=qdot))
# Create an integrator
integrator = ca.Integrator("integrator", "cvodes", dae, {"tf":tf/nk})
# All controls (use matrix graph)
x = ca.MX.sym("x",nk) # nk-by-1 symbolic variable
U = ca.vertsplit(x) # cheaper than x[0], x[1], ...
# The initial state (x_0=0, x_1=1)
X = ca.MX([0,1])
# Objective function
f = 0
# Build a graph of integrator calls
for k in range(nk):
X,QF = itemgetter('xf','qf')(integrator({'x0':X,'p':U[k]}))
f += QF
# Terminal constraints: x_0(T)=x_1(T)=0
g = X
# Allocate an NLP solver
opts = {'linear_solver': 'ma27'}
nlp = ca.MXFunction("nlp", ca.nlpIn(x=x), ca.nlpOut(f=f,g=g))
solver = ca.NlpSolver("solver", "ipopt", nlp, opts)
# Solve the problem
sol = solver({"lbx" : -0.75,
"ubx" : 1,
"x0" : 0,
"lbg" : 0,
"ubg" : 0})
# Retrieve the solution
u_opt = NP.array(sol["x"])
print( sol )
# Time grid
tgrid_x = NP.linspace(0,10,nk+1)
tgrid_u = NP.linspace(0,10,nk)
# Plot the results
plt.figure(1)
plt.clf()
plt.plot(tgrid_u,u_opt,'b-')
plt.title("Van der Pol optimization - single shooting")
plt.xlabel('time')
plt.legend(['u trajectory'])
plt.grid()
plt.show()
| gpl-3.0 |
glouppe/scikit-learn | sklearn/utils/metaestimators.py | 283 | 2353 | """Utilities for meta-estimators"""
# Author: Joel Nothman
# Andreas Mueller
# Licence: BSD
from operator import attrgetter
from functools import update_wrapper
__all__ = ['if_delegate_has_method']
class _IffHasAttrDescriptor(object):
"""Implements a conditional property using the descriptor protocol.
Using this class to create a decorator will raise an ``AttributeError``
if the ``attribute_name`` is not present on the base object.
This allows ducktyping of the decorated method based on ``attribute_name``.
See https://docs.python.org/3/howto/descriptor.html for an explanation of
descriptors.
"""
def __init__(self, fn, attribute_name):
self.fn = fn
self.get_attribute = attrgetter(attribute_name)
# update the docstring of the descriptor
update_wrapper(self, fn)
def __get__(self, obj, type=None):
# raise an AttributeError if the attribute is not present on the object
if obj is not None:
# delegate only on instances, not the classes.
# this is to allow access to the docstrings.
self.get_attribute(obj)
# lambda, but not partial, allows help() to work with update_wrapper
out = lambda *args, **kwargs: self.fn(obj, *args, **kwargs)
# update the docstring of the returned function
update_wrapper(out, self.fn)
return out
def if_delegate_has_method(delegate):
"""Create a decorator for methods that are delegated to a sub-estimator
This enables ducktyping by hasattr returning True according to the
sub-estimator.
>>> from sklearn.utils.metaestimators import if_delegate_has_method
>>>
>>>
>>> class MetaEst(object):
... def __init__(self, sub_est):
... self.sub_est = sub_est
...
... @if_delegate_has_method(delegate='sub_est')
... def predict(self, X):
... return self.sub_est.predict(X)
...
>>> class HasPredict(object):
... def predict(self, X):
... return X.sum(axis=1)
...
>>> class HasNoPredict(object):
... pass
...
>>> hasattr(MetaEst(HasPredict()), 'predict')
True
>>> hasattr(MetaEst(HasNoPredict()), 'predict')
False
"""
return lambda fn: _IffHasAttrDescriptor(fn, '%s.%s' % (delegate, fn.__name__))
| bsd-3-clause |
flavour/porto | private/update_check/eden_update_check.py | 3 | 5382 | # -*- coding: utf-8 -*-
"""
Check whether the configuration is sufficient to run Eden.
"""
def update_check(environment):
# Get Web2py environment into our globals.
globals().update(**environment)
import os
app_path_parts = ["applications", request.application]
app_path = os.path.join(*app_path_parts)
# Fatal configuration errors.
errors = []
# Non-fatal warnings.
warnings = []
# -------------------------------------------------------------------------
# Check Python libraries
try:
import dateutil
except(ImportError):
errors.append("S3 unresolved dependency: dateutil required for Sahana to run")
try:
import lxml
except(ImportError):
errors.append("S3XML unresolved dependency: lxml required for Sahana to run")
try:
import shapely
except(ImportError):
warnings.append("S3GIS unresolved dependency: shapely required for GIS support")
try:
import xlwt
except(ImportError):
warnings.append("S3XLS unresolved dependency: xlwt required for XLS export")
try:
from PIL import Image
except(ImportError):
try:
import Image
except(ImportError):
warnings.append("S3PDF unresolved dependency: Python Imaging required for PDF export")
try:
import reportlab
except(ImportError):
warnings.append("S3PDF unresolved dependency: reportlab required for PDF export")
try:
import matplotlib
except(ImportError):
warnings.append("S3Chart unresolved dependency: matplotlib required for charting")
try:
import numpy
except(ImportError):
warnings.append("S3Cube unresolved dependency: numpy required for pivot table reports")
try:
import scipy
except(ImportError):
warnings.append("S3Cube unresolved dependency: scipy required for pivot table reports")
try:
import tweepy
except(ImportError):
warnings.append("S3Msg unresolved dependency: tweepy required for non-Tropo Twitter support")
# -------------------------------------------------------------------------
# Check Web2Py
# Currently, the minimum usable Web2py is determined by the existence of
# the global "current".
try:
from gluon import current
except ImportError:
errors.append(
"The installed version of Web2py is too old -- it does not define current."
"\nPlease upgrade Web2py to a more recent version.")
web2py_minimum_version = "Version 1.99.2 (2011-09-26 00:51:34) stable"
web2py_version_ok = True
try:
from gluon.fileutils import parse_version
except ImportError:
web2py_version_ok = False
if web2py_version_ok:
web2py_minimum_datetime = parse_version(web2py_minimum_version)[3]
web2py_installed_datetime = request.global_settings.web2py_version[3]
web2py_version_ok = web2py_installed_datetime >= web2py_minimum_datetime
if not web2py_version_ok:
warnings.append(
"The installed version of Web2py is too old to provide the Scheduler,"
"\nso scheduled tasks will not be available. If you need scheduled tasks,"
"\nplease upgrade Web2py to at least version: %s" % \
web2py_minimum_version)
# -------------------------------------------------------------------------
# Copy in Templates
template_src = os.path.join(app_path, "deployment-templates")
template_dst = app_path
template_files = (
os.path.join("models", "000_config.py"),
# Deprecated by Scheduler
#"cron/crontab"
)
copied_from_template = []
for t in template_files:
src_path = os.path.join(template_src, t)
dst_path = os.path.join(template_dst, t)
try:
os.stat(dst_path)
except OSError:
# not found, copy from template
import shutil
shutil.copy(src_path, dst_path)
copied_from_template.append(t)
else:
# Found the file in the destination
# Check if it has been edited
import re
edited_pattern = r"FINISHED_EDITING_\w*\s*=\s*(True|False)"
edited_matcher = re.compile(edited_pattern).match
has_edited = False
with open(dst_path) as f:
for line in f:
edited_result = edited_matcher(line)
if edited_result:
has_edited = True
edited = edited_result.group(1)
break
if has_edited and (edited != "True"):
errors.append("Please edit %s before starting the system." % t)
# @ToDo: Check if it's up to date (i.e. a critical update requirement)
#version_pattern = r"VERSION_\w*\s*=\s*([0-9]+)"
#version_matcher = re.compile(version_pattern).match
#has_version = False
if copied_from_template:
errors.append(
"The following files were copied from templates and should be edited: %s" %
", ".join(copied_from_template))
return {"error_messages": errors, "warning_messages": warnings}
# =============================================================================
| mit |
mxjl620/scikit-learn | sklearn/svm/setup.py | 321 | 3157 | import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('svm', parent_package, top_path)
config.add_subpackage('tests')
# Section LibSVM
# we compile both libsvm and libsvm_sparse
config.add_library('libsvm-skl',
sources=[join('src', 'libsvm', 'libsvm_template.cpp')],
depends=[join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')],
# Force C++ linking in case gcc is picked up instead
# of g++ under windows with some versions of MinGW
extra_link_args=['-lstdc++'],
)
libsvm_sources = ['libsvm.c']
libsvm_depends = [join('src', 'libsvm', 'libsvm_helper.c'),
join('src', 'libsvm', 'libsvm_template.cpp'),
join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')]
config.add_extension('libsvm',
sources=libsvm_sources,
include_dirs=[numpy.get_include(),
join('src', 'libsvm')],
libraries=['libsvm-skl'],
depends=libsvm_depends,
)
### liblinear module
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
liblinear_sources = ['liblinear.c',
join('src', 'liblinear', '*.cpp')]
liblinear_depends = [join('src', 'liblinear', '*.h'),
join('src', 'liblinear', 'liblinear_helper.c')]
config.add_extension('liblinear',
sources=liblinear_sources,
libraries=cblas_libs,
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
depends=liblinear_depends,
# extra_compile_args=['-O0 -fno-inline'],
** blas_info)
## end liblinear module
# this should go *after* libsvm-skl
libsvm_sparse_sources = ['libsvm_sparse.c']
config.add_extension('libsvm_sparse', libraries=['libsvm-skl'],
sources=libsvm_sparse_sources,
include_dirs=[numpy.get_include(),
join("src", "libsvm")],
depends=[join("src", "libsvm", "svm.h"),
join("src", "libsvm",
"libsvm_sparse_helper.c")])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
ARM-software/CMSIS_5 | CMSIS/DSP/Examples/ARM/arm_svm_example/train.py | 2 | 3562 | from sklearn import svm
import random
import numpy as np
import math
from pylab import scatter,figure, clf, plot, xlabel, ylabel, xlim, ylim, title, grid, axes, show,semilogx, semilogy
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
from matplotlib.colors import BoundaryNorm,ListedColormap
# Generation of data to train the SVM classifier
# 100 vectors are generated. Vector have dimension 2 so can be represented as points
NBVECS = 100
VECDIM = 2
# A cluster of point is generated around the origin.
ballRadius = 0.5
x = ballRadius * np.random.randn(NBVECS,2)
# An annulus of point is generated around the central cluster.
angle = 2.0*math.pi * np.random.randn(1,NBVECS)
radius = 3.0+0.1*np.random.randn(1,NBVECS)
xa = np.zeros((NBVECS,2))
xa[:,0]=radius*np.cos(angle)
xa[:,1]=radius*np.sin(angle)
# All points are concatenated
X_train=np.concatenate((x,xa))
# First points (central cluster) are corresponding to class 0
# OTher points (annulus) are corresponding to class 1
Y_train=np.concatenate((np.zeros(NBVECS),np.ones(NBVECS)))
# Some bounds are computed for the graphical representation
x_min = X_train[:, 0].min()
x_max = X_train[:, 0].max()
y_min = X_train[:, 1].min()
y_max = X_train[:, 1].max()
# Training is done with a polynomial SVM
clf = svm.SVC(kernel='poly',gamma='auto', coef0=1.1)
clf.fit(X_train, Y_train)
# The classifier is tested with a first point inside first class
test1=np.array([0.4,0.1])
test1=test1.reshape(1,-1)
predicted1 = clf.predict(test1)
# Predicted class should be 0
print(predicted1)
# Second test is made with a point inside the second class (in the annulus)
test2=np.array([x_max,0]).reshape(1,-1)
predicted2 = clf.predict(test2)
# Predicted class should be 1
print(predicted2)
# The parameters of the trained classifier are printed to be used
# in CMSIS-DSP
supportShape = clf.support_vectors_.shape
nbSupportVectors=supportShape[0]
vectorDimensions=supportShape[1]
print("nbSupportVectors = %d" % nbSupportVectors)
print("vectorDimensions = %d" % vectorDimensions)
print("degree = %d" % clf.degree)
print("coef0 = %f" % clf.coef0)
print("gamma = %f" % clf._gamma)
print("intercept = %f" % clf.intercept_)
dualCoefs=clf.dual_coef_
dualCoefs=dualCoefs.reshape(nbSupportVectors)
supportVectors=clf.support_vectors_
supportVectors = supportVectors.reshape(nbSupportVectors*VECDIM)
print("Dual Coefs")
print(dualCoefs)
print("Support Vectors")
print(supportVectors)
# Graphical representation to display the cluster of points
# and the SVM boundary
r=plt.figure()
plt.axis('off')
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
levels = MaxNLocator(nbins=15).tick_values(Z.min(), Z.max())
#cmap = plt.get_cmap('gray')
newcolors = ['#FFFFFF','#FFFFFF']
cmap = ListedColormap(newcolors)
norm = BoundaryNorm(levels, ncolors=cmap.N, clip=True)
plt.pcolormesh(XX, YY, Z > 0, cmap=cmap,norm=norm)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'],
linestyles=['--', '-', '--'], levels=[-.5, 0, .5])
scatter(x[:,0],x[:,1],s=1.0,color='#FF6B00')
scatter(xa[:,0],xa[:,1],s=1.0,color='#95D600')
# The test points are displayed in red.
scatter(test1[:,0],test1[:,1],s=6.0,color='Red')
scatter(test2[:,0],test2[:,1],s=6.0,color='Red')
#r.savefig('fig1.jpeg')
#plt.close(r)
show()
#r=plt.figure()
#plt.axis('off')
#scatter(x[:,0],x[:,1],s=1.0,color='#FF6B00')
#scatter(xa[:,0],xa[:,1],s=1.0,color='#95D600')
#r.savefig('fig2.jpeg')
#plt.close(r) | apache-2.0 |
awickert/gFlex | input/Te_sample/makebreaks.py | 3 | 1743 | #! /usr/bin/python
import numpy as np
from matplotlib import pyplot as plt
TeScalar = 80000
#Te = TeScalar*np.ones((100,100))
Te = TeScalar*np.ones(2000)
# Make a discontinuous break: will be unstable, but a check of how to program
# this
def discontinuous(orientation,number,proportion):
output = np.zeros(Te.shape)
if orientation=='row':
output[number,:] = TeScalar
elif orientation=='column':
output[:,number] = TeScalar
return output
def slice2d(rowcol,proportion):
output = np.zeros(Te.shape)
for i in rowcol:
# "max" b/c I am thinking of whole-grid Gaussian in future
# max(output[i-1:i+1,:])
output[i-1:i+1,:] = proportion*TeScalar
output[:,i-1:i+1] = proportion*TeScalar
return output
#output = np.zeros(Te.shape)
#rowcol = [25,50,75]
#for i in rowcol:
# output[
#Te -= slice2d([25,50,75],.5)
#for i in 25,50,75:
# Te-=discontinuous('row',i,.99)
# Te-=discontinuous('column',i,.99)
# Make a Gaussian function in the middle of a grid with the shape of mine
def gaussian(rowcol,proportion):
# Only for square grids
g = np.zeros(Te.shape)
for i in rowcol:
a = TeScalar*proportion
b = i
c = 8.
x=np.arange(0,len(Te))
gaussian1d = a*np.exp((-(x-b)**2)/(2*c**2))
for i in range(len(Te)):
if len(Te.shape) == 2:
for j in range(len(Te)):
g[i,j] = max(g[i,j], gaussian1d[j])
g[j,i] = max(g[j,i], gaussian1d[j])
elif len(Te.shape) ==1:
g[i] = max(g[i], gaussian1d[i])
else:
print "Error!"
raise SystemExit
return g
#Te -= gaussian([25,75],.8)
Te -= gaussian([200,1000,1800],.99)
if len(Te.shape) == 2:
plt.imshow(Te)
plt.colorbar()
elif len(Te.shape) == 1:
plt.plot(Te)
plt.show()
| gpl-3.0 |
Winand/pandas | pandas/tests/io/parser/header.py | 4 | 9794 | # -*- coding: utf-8 -*-
"""
Tests that the file header is properly handled or inferred
during parsing for all of the parsers defined in parsers.py
"""
import pytest
import numpy as np
import pandas.util.testing as tm
from pandas import DataFrame, Index, MultiIndex
from pandas.compat import StringIO, lrange, u
class HeaderTests(object):
def test_read_with_bad_header(self):
errmsg = r"but only \d+ lines in file"
with tm.assert_raises_regex(ValueError, errmsg):
s = StringIO(',,')
self.read_csv(s, header=[10])
def test_bool_header_arg(self):
# see gh-6114
data = """\
MyColumn
a
b
a
b"""
for arg in [True, False]:
with pytest.raises(TypeError):
self.read_csv(StringIO(data), header=arg)
with pytest.raises(TypeError):
self.read_table(StringIO(data), header=arg)
def test_no_header_prefix(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df_pref = self.read_table(StringIO(data), sep=',', prefix='Field',
header=None)
expected = np.array([[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]], dtype=np.int64)
tm.assert_almost_equal(df_pref.values, expected)
tm.assert_index_equal(df_pref.columns,
Index(['Field0', 'Field1', 'Field2',
'Field3', 'Field4']))
def test_header_with_index_col(self):
data = """foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
names = ['A', 'B', 'C']
df = self.read_csv(StringIO(data), names=names)
assert list(df.columns) == ['A', 'B', 'C']
values = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
expected = DataFrame(values, index=['foo', 'bar', 'baz'],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(df, expected)
def test_header_not_first_line(self):
data = """got,to,ignore,this,line
got,to,ignore,this,line
index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
data2 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
df = self.read_csv(StringIO(data), header=2, index_col=0)
expected = self.read_csv(StringIO(data2), header=0, index_col=0)
tm.assert_frame_equal(df, expected)
def test_header_multi_index(self):
expected = tm.makeCustomDataframe(
5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
data = """\
C0,,C_l0_g0,C_l0_g1,C_l0_g2
C1,,C_l1_g0,C_l1_g1,C_l1_g2
C2,,C_l2_g0,C_l2_g1,C_l2_g2
C3,,C_l3_g0,C_l3_g1,C_l3_g2
R0,R1,,,
R_l0_g0,R_l1_g0,R0C0,R0C1,R0C2
R_l0_g1,R_l1_g1,R1C0,R1C1,R1C2
R_l0_g2,R_l1_g2,R2C0,R2C1,R2C2
R_l0_g3,R_l1_g3,R3C0,R3C1,R3C2
R_l0_g4,R_l1_g4,R4C0,R4C1,R4C2
"""
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
# skipping lines in the header
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
# INVALID OPTIONS
# no as_recarray
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
pytest.raises(ValueError, self.read_csv,
StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], as_recarray=True,
tupleize_cols=False)
# names
pytest.raises(ValueError, self.read_csv,
StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], names=['foo', 'bar'],
tupleize_cols=False)
# usecols
pytest.raises(ValueError, self.read_csv,
StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], usecols=['foo', 'bar'],
tupleize_cols=False)
# non-numeric index_col
pytest.raises(ValueError, self.read_csv,
StringIO(data), header=[0, 1, 2, 3],
index_col=['foo', 'bar'], tupleize_cols=False)
def test_header_multiindex_common_format(self):
df = DataFrame([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]],
index=['one', 'two'],
columns=MultiIndex.from_tuples(
[('a', 'q'), ('a', 'r'), ('a', 's'),
('b', 't'), ('c', 'u'), ('c', 'v')]))
# to_csv
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
,,,,,,
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common, no index_col
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=None)
tm.assert_frame_equal(df.reset_index(drop=True), result)
# malformed case 1
expected = DataFrame(np.array(
[[2, 3, 4, 5, 6], [8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')],
[u('r'), u('s'), u('t'),
u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [0, 1, 2, 3, 4]],
names=[u('a'), u('q')]))
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# malformed case 2
expected = DataFrame(np.array(
[[2, 3, 4, 5, 6], [8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')],
[u('r'), u('s'), u('t'),
u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [0, 1, 2, 3, 4]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# mi on columns and index (malformed)
expected = DataFrame(np.array(
[[3, 4, 5, 6], [9, 10, 11, 12]], dtype='int64'),
index=MultiIndex(levels=[[1, 7], [2, 8]],
labels=[[0, 1], [0, 1]]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')],
[u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 1, 2, 2], [0, 1, 2, 3]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1])
tm.assert_frame_equal(expected, result)
def test_header_names_backward_compat(self):
# #2539
data = '1,2,3\n4,5,6'
result = self.read_csv(StringIO(data), names=['a', 'b', 'c'])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
tm.assert_frame_equal(result, expected)
data2 = 'foo,bar,baz\n' + data
result = self.read_csv(StringIO(data2), names=['a', 'b', 'c'],
header=0)
tm.assert_frame_equal(result, expected)
def test_read_only_header_no_rows(self):
# See gh-7773
expected = DataFrame(columns=['a', 'b', 'c'])
df = self.read_csv(StringIO('a,b,c'))
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO('a,b,c'), index_col=False)
tm.assert_frame_equal(df, expected)
def test_no_header(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df = self.read_table(StringIO(data), sep=',', header=None)
df_pref = self.read_table(StringIO(data), sep=',', prefix='X',
header=None)
names = ['foo', 'bar', 'baz', 'quux', 'panda']
df2 = self.read_table(StringIO(data), sep=',', names=names)
expected = np.array([[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]], dtype=np.int64)
tm.assert_almost_equal(df.values, expected)
tm.assert_almost_equal(df.values, df2.values)
tm.assert_index_equal(df_pref.columns,
Index(['X0', 'X1', 'X2', 'X3', 'X4']))
tm.assert_index_equal(df.columns, Index(lrange(5)))
tm.assert_index_equal(df2.columns, Index(names))
def test_non_int_header(self):
# GH 16338
msg = 'header must be integer or list of integers'
data = """1,2\n3,4"""
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(data), sep=',', header=['a', 'b'])
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(data), sep=',', header='string_header')
def test_singleton_header(self):
# See GH #7757
data = """a,b,c\n0,1,2\n1,2,3"""
df = self.read_csv(StringIO(data), header=[0])
expected = DataFrame({"a": [0, 1], "b": [1, 2], "c": [2, 3]})
tm.assert_frame_equal(df, expected)
| bsd-3-clause |
RafaelCosman/pybrain | pybrain/tools/plotting/multiline.py | 25 | 7884 | # $Id$
__author__ = 'Martin Felder and Frank Sehnke'
import math, imp
from matplotlib.lines import Line2D
from pylab import clf, plot, axes, show, xlabel, ylabel, savefig, ioff, draw_if_interactive
class MultilinePlotter:
""" Basic plotting class build on pylab
Implementing by instancing the class with the number of different plots to show.
Every plot has an id so adding data is done by addData(id, xValue, yValue) of the given data point
:todo: Add possibility to stick markers to the plots
:todo: Some error checking and documentation
:todo: Derive from this to make classes for trn/tst data plotting with different linestyles
"""
# some nice color definitions for graphs (from colorbrewer.org)
graphColor = [(0.894117647, 0.101960784, 0.109803922), \
(0.215686275, 0.494117647, 0.721568627), \
(0.301960784, 0.68627451, 0.290196078), \
(0.596078431, 0.305882353, 0.639215686), \
(1, 0.498039216, 0), \
(1, 1, 0.2), \
(0.650980392, 0.337254902, 0.156862745), \
(0.968627451, 0.505882353, 0.749019608), \
(0.6, 0.6, 0.6)]
def __init__(self, maxLines=1, autoscale=0.0, **kwargs):
"""
:key maxLines: Number of Plots to draw and so max ID.
:key autoscale: If set to a factor > 1, axes are automatically expanded whenever out-range data points are added
:var indexList: The x-component of the data points
:var DataList: The y-component of the data points"""
self.indexList = []
self.dataList = []
self.Lines = []
self.autoscale = autoscale
clf()
self.Axes = axes(**kwargs)
self.nbLines = 0
self.defaultLineStyle = {}
self._checkMaxId(maxLines - 1)
self.replot = True # is the plot still current?
self.currentID = None
self.offset = 0 # external references to IDs are modified by this
def setOffset(self, offs):
""" Set an offset that modifies all subsequent references to line IDs
:key offs: The desired offset """
self.offset = offs
#def createFigure(self, size=[12,8], interactive=True):
#""" initialize the graphics output window """
## FIXME: doesn work, because axes() in the constructor already creates a figure
#pylab.figure(figsize=size)
#if interactive: pylab.ion()
def _checkMaxId(self, id):
""" Appends additional lines as necessary
:key id: Lines up to this id are added automatically """
if id >= self.nbLines:
for i in range(self.nbLines, id + 1):
# create a new line with corresponding x/y data, and attach it to the plot
l = Line2D([], [], color=self.graphColor[i % 9], **self.defaultLineStyle)
self.Lines.append(l)
self.Axes.add_line(l)
self.indexList.append([])
self.dataList.append([])
self.nbLines = id + 1
def addData(self, id0, x, y):
""" The given data point or points is appended to the given line.
:key id0: The plot ID (counted from 0) the data point(s) belong to.
:key x: The x-component of the data point(s)
:key y: The y-component of the data point(s)"""
id = id0 + self.offset
if not (isinstance(x, list) | isinstance(x, tuple)):
self._checkMaxId(id)
self.indexList[id].append(x)
self.dataList[id].append(y)
self.currentID = id
else:
for i, xi in enumerate(x):
self.addData(id0, xi, y[i])
self.replot = True
def setData(self, id0, x, y):
""" Data series id0 is replaced by the given lists
:key id0: The plot ID (counted from 0) the data point(s) belong to.
:key x: The x-component of the data points
:key y: The y-component of the data points"""
id = id0 + self.offset
self._checkMaxId(id)
self.indexList[id] = x
self.dataList[id] = y
self.replot = True
def saveData(self, filename):
""" Writes the data series for all points to a file
:key filename: The name of the output file """
file = open(filename, "w")
for i in range(self.nbLines):
datLen = len(self.indexList[i])
for j in range(datLen):
file.write(repr(self.indexList[i][j]) + "\n")
file.write(repr(self.dataList[i][j]) + "\n")
file.close()
def setLabels(self, x='', y='', title=''):
""" set axis labels and title """
self.Axes.set_xlabel(x)
self.Axes.set_ylabel(y)
self.Axes.set_title(title)
def setLegend(self, *args, **kwargs):
""" hand parameters to the legend """
self.Axes.legend(*args, **kwargs)
def setLineStyle(self, id=None, **kwargs):
""" hand parameters to the specified line(s), and set them as default for new lines
:key id: The line or lines (list!) to be modified - defaults to last one added """
if id is None:
id = self.currentID
if isinstance(id, list) | isinstance(id, tuple):
# apply to specified list of lines
self._checkMaxId(max(id) + self.offset)
for i in id:
self.Lines[i + self.offset].set(**kwargs)
elif id >= 0:
# apply to selected line
self._checkMaxId(id + self.offset)
self.Lines[id + self.offset].set(**kwargs)
else:
# apply to all lines
for l in self.Lines:
l.set(**kwargs)
# set as new default linestyle
if 'color' in kwargs:
kwargs.popitem('color')
self.defaultLineStyle = kwargs
def update(self):
""" Updates the current plot, if necessary """
if not self.replot:
return
xr = list(self.Axes.get_xlim())
yr = list(self.Axes.get_ylim())
for i in range(self.nbLines):
self.Lines[i].set_data(self.indexList[i], self.dataList[i])
if self.autoscale > 1.0:
if self.indexList[i][0] < xr[0]:
xr[0] = self.indexList[i][0]
ymn = min(self.dataList[i])
if ymn < yr[0]:
yr[0] = ymn
while self.indexList[i][-1] > xr[1]:
xr[1] = (xr[1] - xr[0]) * self.autoscale + xr[0]
ymx = max(self.dataList[i])
while ymx > yr[1]:
yr[1] = (yr[1] - yr[0]) * self.autoscale + yr[0]
if self.autoscale > 1.0:
self.Axes.set_xlim(tuple(xr))
self.Axes.set_ylim(tuple(yr))
#self.Axes.draw()
#pylab.show()
draw_if_interactive()
self.replot = False
def show(self, xLabel='', yLabel='', Title='', popup=False, imgfile=None):
""" Plots the data internally and saves an image of it to the plotting directory.
:key title: The title of the plot.
:key xLable: The label for the x-axis
:key yLable: The label for the y-axis
:key popup: also produce a popup window with the image?"""
clf()
for i in range(self.nbLines):
plot(self.indexList[i], self.dataList[i])
xlabel(xLabel)
ylabel(yLabel)
title(Title)
if imgfile == None:
imgfile = imp.find_module('pybrain')[1] + "/tools/plotting/plot.png"
savefig(imgfile)
if popup:
ioff()
show()
"""Small example to demonstrate how the plot class can be used"""
if __name__ == "__main__":
pbplot = MultilinePlotter(7)
for i in range(400000):
if i / 100000 == i / 100000.0:
for j in range(7):
pbplot.addData(j, i, math.sqrt(float(i * (j + 1))))
pbplot.show("WorldInteractions", "Fitness", "Example Plot", True)
| bsd-3-clause |
anurag313/scikit-learn | sklearn/neighbors/nearest_centroid.py | 199 | 7249 | # -*- coding: utf-8 -*-
"""
Nearest Centroid Classification
"""
# Author: Robert Layton <[email protected]>
# Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import sparse as sp
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import pairwise_distances
from ..preprocessing import LabelEncoder
from ..utils.validation import check_array, check_X_y, check_is_fitted
from ..utils.sparsefuncs import csc_median_axis_0
class NearestCentroid(BaseEstimator, ClassifierMixin):
"""Nearest centroid classifier.
Each class is represented by its centroid, with test samples classified to
the class with the nearest centroid.
Read more in the :ref:`User Guide <nearest_centroid_classifier>`.
Parameters
----------
metric: string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.pairwise_distances for its
metric parameter.
The centroids for the samples corresponding to each class is the point
from which the sum of the distances (according to the metric) of all
samples that belong to that particular class are minimized.
If the "manhattan" metric is provided, this centroid is the median and
for all other metrics, the centroid is now set to be the mean.
shrink_threshold : float, optional (default = None)
Threshold for shrinking centroids to remove features.
Attributes
----------
centroids_ : array-like, shape = [n_classes, n_features]
Centroid of each class
Examples
--------
>>> from sklearn.neighbors.nearest_centroid import NearestCentroid
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = NearestCentroid()
>>> clf.fit(X, y)
NearestCentroid(metric='euclidean', shrink_threshold=None)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.neighbors.KNeighborsClassifier: nearest neighbors classifier
Notes
-----
When used for text classification with tf-idf vectors, this classifier is
also known as the Rocchio classifier.
References
----------
Tibshirani, R., Hastie, T., Narasimhan, B., & Chu, G. (2002). Diagnosis of
multiple cancer types by shrunken centroids of gene expression. Proceedings
of the National Academy of Sciences of the United States of America,
99(10), 6567-6572. The National Academy of Sciences.
"""
def __init__(self, metric='euclidean', shrink_threshold=None):
self.metric = metric
self.shrink_threshold = shrink_threshold
def fit(self, X, y):
"""
Fit the NearestCentroid model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Note that centroid shrinking cannot be used with sparse matrices.
y : array, shape = [n_samples]
Target values (integers)
"""
# If X is sparse and the metric is "manhattan", store it in a csc
# format is easier to calculate the median.
if self.metric == 'manhattan':
X, y = check_X_y(X, y, ['csc'])
else:
X, y = check_X_y(X, y, ['csr', 'csc'])
is_X_sparse = sp.issparse(X)
if is_X_sparse and self.shrink_threshold:
raise ValueError("threshold shrinking not supported"
" for sparse input")
n_samples, n_features = X.shape
le = LabelEncoder()
y_ind = le.fit_transform(y)
self.classes_ = classes = le.classes_
n_classes = classes.size
if n_classes < 2:
raise ValueError('y has less than 2 classes')
# Mask mapping each class to it's members.
self.centroids_ = np.empty((n_classes, n_features), dtype=np.float64)
# Number of clusters in each class.
nk = np.zeros(n_classes)
for cur_class in range(n_classes):
center_mask = y_ind == cur_class
nk[cur_class] = np.sum(center_mask)
if is_X_sparse:
center_mask = np.where(center_mask)[0]
# XXX: Update other averaging methods according to the metrics.
if self.metric == "manhattan":
# NumPy does not calculate median of sparse matrices.
if not is_X_sparse:
self.centroids_[cur_class] = np.median(X[center_mask], axis=0)
else:
self.centroids_[cur_class] = csc_median_axis_0(X[center_mask])
else:
if self.metric != 'euclidean':
warnings.warn("Averaging for metrics other than "
"euclidean and manhattan not supported. "
"The average is set to be the mean."
)
self.centroids_[cur_class] = X[center_mask].mean(axis=0)
if self.shrink_threshold:
dataset_centroid_ = np.mean(X, axis=0)
# m parameter for determining deviation
m = np.sqrt((1. / nk) + (1. / n_samples))
# Calculate deviation using the standard deviation of centroids.
variance = (X - self.centroids_[y_ind]) ** 2
variance = variance.sum(axis=0)
s = np.sqrt(variance / (n_samples - n_classes))
s += np.median(s) # To deter outliers from affecting the results.
mm = m.reshape(len(m), 1) # Reshape to allow broadcasting.
ms = mm * s
deviation = ((self.centroids_ - dataset_centroid_) / ms)
# Soft thresholding: if the deviation crosses 0 during shrinking,
# it becomes zero.
signs = np.sign(deviation)
deviation = (np.abs(deviation) - self.shrink_threshold)
deviation[deviation < 0] = 0
deviation *= signs
# Now adjust the centroids using the deviation
msd = ms * deviation
self.centroids_ = dataset_centroid_[np.newaxis, :] + msd
return self
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Notes
-----
If the metric constructor parameter is "precomputed", X is assumed to
be the distance matrix between the data to be predicted and
``self.centroids_``.
"""
check_is_fitted(self, 'centroids_')
X = check_array(X, accept_sparse='csr')
return self.classes_[pairwise_distances(
X, self.centroids_, metric=self.metric).argmin(axis=1)]
| bsd-3-clause |
CVML/hyperopt | hyperopt/tests/test_pchoice.py | 6 | 4524 | from functools import partial
import numpy as np
import unittest
from sklearn import datasets
from hyperopt import hp, Trials, fmin, tpe, rand
import hyperopt.pyll.stochastic
class TestPChoice(unittest.TestCase):
def test_basic(self):
space = hp.pchoice('naive_type',
[(.14, 'gaussian'),
(.02, 'multinomial'),
(.84, 'bernoulli')])
a, b, c = 0, 0, 0
rng = np.random.RandomState(123)
for i in range(0, 1000):
nesto = hyperopt.pyll.stochastic.sample(space, rng=rng)
if nesto == 'gaussian':
a += 1
elif nesto == 'multinomial':
b += 1
elif nesto == 'bernoulli':
c += 1
print(a, b, c)
assert a + b + c == 1000
assert 120 < a < 160
assert 0 < b < 40
assert 800 < c < 900
def test_basic2(self):
space = hp.choice('normal_choice', [
hp.pchoice('fsd',
[(.1, 'first'),
(.8, 'second'),
(.1, 2)]),
hp.choice('something_else', [10, 20])
])
a, b, c = 0, 0, 0
rng=np.random.RandomState(123)
for i in range(0, 1000):
nesto = hyperopt.pyll.stochastic.sample(space, rng=rng)
if nesto == 'first':
a += 1
elif nesto == 'second':
b += 1
elif nesto == 2:
c += 1
elif nesto in (10, 20):
pass
else:
assert 0, nesto
print(a, b, c)
assert b > 2 * a
assert b > 2 * c
def test_basic3(self):
space = hp.pchoice('something', [
(.2, hp.pchoice('number', [(.8, 2), (.2, 1)])),
(.8, hp.pchoice('number1', [(.7, 5), (.3, 6)]))
])
a, b, c, d = 0, 0, 0, 0
rng = np.random.RandomState(123)
for i in range(0, 2000):
nesto = hyperopt.pyll.stochastic.sample(space, rng=rng)
if nesto == 2:
a += 1
elif nesto == 1:
b += 1
elif nesto == 5:
c += 1
elif nesto == 6:
d += 1
else:
assert 0, nesto
print(a, b, c, d)
assert a + b + c + d == 2000
assert 300 < a + b < 500
assert 1500 < c + d < 1700
assert a * .3 > b # a * 1.2 > 4 * b
assert c * 3 * 1.2 > d * 7
class TestSimpleFMin(unittest.TestCase):
# test that that a space with a pchoice in it is
# (a) accepted by various algos and
# (b) handled correctly.
#
def setUp(self):
self.space = hp.pchoice('a', [
(.1, 0),
(.2, 1),
(.3, 2),
(.4, 3)])
self.trials = Trials()
def objective(self, a):
return [1, 1, 1, 0 ][a]
def test_random(self):
# test that that a space with a pchoice in it is
# (a) accepted by tpe.suggest and
# (b) handled correctly.
N = 50
fmin(self.objective,
space=self.space,
trials=self.trials,
algo=rand.suggest,
max_evals=N)
a_vals = [t['misc']['vals']['a'][0] for t in self.trials.trials]
counts = np.bincount(a_vals)
print counts
assert counts[3] > N * .35
assert counts[3] < N * .60
def test_tpe(self):
N = 100
fmin(self.objective,
space=self.space,
trials=self.trials,
algo=partial(tpe.suggest, n_startup_jobs=10),
max_evals=N)
a_vals = [t['misc']['vals']['a'][0] for t in self.trials.trials]
counts = np.bincount(a_vals)
print counts
assert counts[3] > N * .6
def test_bug1_rand():
space = hp.choice('preprocess_choice', [
{'pwhiten': hp.pchoice('whiten_randomPCA',
[(.3, False), (.7, True)])},
{'palgo': False},
{'pthree': 7}])
best = fmin(fn=lambda x: 1,
space=space,
algo=rand.suggest,
max_evals=50)
def test_bug1_tpe():
space = hp.choice('preprocess_choice', [
{'pwhiten': hp.pchoice('whiten_randomPCA',
[(.3, False), (.7, True)])},
{'palgo': False},
{'pthree': 7}])
best = fmin(fn=lambda x: 1,
space=space,
algo=tpe.suggest,
max_evals=50)
| bsd-3-clause |
MicrosoftGenomics/PySnpTools | pysnptools/pstreader/_subset.py | 1 | 6026 | import numpy as np
import subprocess, sys, os.path
from itertools import *
import pandas as pd
import logging
from pysnptools.pstreader import PstReader
from pysnptools.pstreader import PstData
#!!Should handle negatives as index and arrays of index, but doesn't
class _PstSubset(PstReader):
def __init__(self, internal, row_indexer, col_indexer):
'''
an indexer can be:
an integer i (same as [i])
a slice
a list of integers
a list of booleans
'''
super(_PstSubset, self).__init__()
self._ran_once = False
self._internal = internal
self._row_indexer = PstReader._make_sparray_or_slice(row_indexer)
self._col_indexer = PstReader._make_sparray_or_slice(col_indexer)
def __repr__(self):
s = "{0}[{1},{2}]".format(self._internal,_PstSubset.static_nice_string(self,self._row_indexer),_PstSubset.static_nice_string(self,self._col_indexer))
return s
def copyinputs(self, copier):
self._internal.copyinputs(copier)
@property
def row(self):
self.run_once()
return self._row
@property
def col(self):
self.run_once()
return self._col
@property
def row_property(self):
self.run_once()
return self._row_property
@property
def col_property(self):
self.run_once()
return self._col_property
# Most _read's support only indexlists or None, but this one supports Slices, too.
_read_accepts_slices = True
def _read(self, row_indexer, col_indexer, order, dtype, force_python_only, view_ok):
self.run_once()
if hasattr(self._internal,'_read_accepts_slices'):
assert self._internal._read_accepts_slices, "If an object has the _read_accepts_slices attribute, it must have value 'True'"
composed_row_index_or_none = _PstSubset.compose_indexer_with_indexer(self._internal.row_count, self._row_indexer, self.row_count, row_indexer)
composed_col_index_or_none = _PstSubset.compose_indexer_with_indexer(self._internal.col_count, self._col_indexer, self.col_count, col_indexer)
val = self._internal._read(composed_row_index_or_none, composed_col_index_or_none, order, dtype, force_python_only, view_ok)
return val
else:
row_index_or_none = PstReader._make_sparray_from_sparray_or_slice(self.row_count, row_indexer)
composed_row_index_or_none = _PstSubset.compose_indexer_with_index_or_none(self._internal.row_count, self._row_indexer, self.row_count, row_index_or_none)
col_index_or_none = PstReader._make_sparray_from_sparray_or_slice(self.col_count, col_indexer)
composed_col_index_or_none = _PstSubset.compose_indexer_with_index_or_none(self._internal.col_count, self._col_indexer, self.col_count, col_index_or_none)
val = self._internal._read(composed_row_index_or_none, composed_col_index_or_none, order, dtype, force_python_only, view_ok)
return val
def run_once(self):
if self._ran_once:
return
self._ran_once = True
self._row = self._internal.row[self._row_indexer]
self._col = self._internal.col[self._col_indexer]
if self._row.dtype == self._col.dtype and np.array_equal(self._row,self._col): #When an object is square, keep the row and col the same object.
self._col = self._row
self._row_property = self._internal.row_property[self._row_indexer]
self._col_property = self._internal.col_property[self._col_indexer]
_slice_format = {(False,False,False):":",
(False,False,True):"::{2}",
(False,True,False):":{1}",
(False,True,True):":{1}:{2}",
(True,False,False):"{0}:",
(True,False,True):"{0}::{2}",
(True,True,False):"{0}:{1}",
(True,True,True):"{0}:{1}:{2}"}
@staticmethod
def static_nice_string(self, some_slice):
if isinstance(some_slice,slice):
return _PstSubset._slice_format[(some_slice.start is not None, some_slice.stop is not None, some_slice.step is not None)].format(some_slice.start, some_slice.stop, some_slice.step)
elif len(some_slice) == 1:
return str(some_slice[0])
elif len(some_slice) < 10:
return "[{0}]".format(",".join([str(i) for i in some_slice]))
else:
return "[{0},...]".format(",".join([str(i) for i in some_slice[:10]]))
#!!commented out because doesn't guarantee that the shortcut will return with the dtype and order requested.
# Also, didn't handle stacked do-nothing subsets
#def read(self, order='F', dtype=np.float64, force_python_only=False, view_ok=False):
# if view_ok and hasattr(self._internal,"val") and _PstSubset._is_all_slice(self._row_indexer) and _PstSubset._is_all_slice(self._col_indexer):
# return self._internal
# else:
# return PstReader.read(self, order, dtype, force_python_only, view_ok)
@staticmethod
def compose_indexer_with_index_or_none(countA, indexerA, countB, index_or_noneB):
if _PstSubset._is_all_slice(indexerA):
return index_or_noneB
indexA = PstReader._make_sparray_from_sparray_or_slice(countA, indexerA)
if _PstSubset._is_all_slice(index_or_noneB):
return indexA
indexAB = indexA[index_or_noneB]
return indexAB
@staticmethod
def compose_indexer_with_indexer(countA, indexerA, countB, indexerB):
if _PstSubset._is_all_slice(indexerA):
return indexerB
if _PstSubset._is_all_slice(indexerB):
return indexerA
indexA = PstReader._make_sparray_from_sparray_or_slice(countA, indexerA)
indexB = PstReader._make_sparray_from_sparray_or_slice(countB, indexerB)
indexAB = indexA[indexB]
return indexAB
| apache-2.0 |
matthew-tucker/mne-python | examples/time_frequency/plot_compute_raw_data_spectrum.py | 16 | 2573 | """
==================================================
Compute the power spectral density of raw data
==================================================
This script shows how to compute the power spectral density (PSD)
of measurements on a raw dataset. It also show the effect of applying SSP
to the data to reduce ECG and EOG artifacts.
"""
# Authors: Alexandre Gramfort <[email protected]>
# Martin Luessi <[email protected]>
# Eric Larson <[email protected]>
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io, read_proj, read_selection
from mne.datasets import sample
print(__doc__)
###############################################################################
# Set parameters
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
proj_fname = data_path + '/MEG/sample/sample_audvis_eog_proj.fif'
# Setup for reading the raw data
raw = io.Raw(raw_fname, preload=True)
raw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more
# Add SSP projection vectors to reduce EOG and ECG artifacts
projs = read_proj(proj_fname)
raw.add_proj(projs, remove_existing=True)
tmin, tmax = 0, 60 # use the first 60s of data
fmin, fmax = 2, 300 # look at frequencies between 2 and 300Hz
n_fft = 2048 # the FFT size (n_fft). Ideally a power of 2
plt.ion()
# Let's first check out all channel types
raw.plot_psd(area_mode='range', tmax=10.0)
# Now let's focus on a smaller subset:
# Pick MEG magnetometers in the Left-temporal region
selection = read_selection('Left-temporal')
picks = mne.pick_types(raw.info, meg='mag', eeg=False, eog=False,
stim=False, exclude='bads', selection=selection)
# Let's just look at the first few channels for demonstration purposes
picks = picks[:4]
plt.figure()
ax = plt.axes()
raw.plot_psd(tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax, n_fft=n_fft,
n_jobs=1, proj=False, ax=ax, color=(0, 0, 1), picks=picks)
# And now do the same with SSP applied
raw.plot_psd(tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax, n_fft=n_fft,
n_jobs=1, proj=True, ax=ax, color=(0, 1, 0), picks=picks)
# And now do the same with SSP + notch filtering
raw.notch_filter(np.arange(60, 241, 60), picks=picks, n_jobs=1)
raw.plot_psd(tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax, n_fft=n_fft,
n_jobs=1, proj=True, ax=ax, color=(1, 0, 0), picks=picks)
ax.set_title('Four left-temporal magnetometers')
plt.legend(['Without SSP', 'With SSP', 'SSP + Notch'])
| bsd-3-clause |
mattgiguere/scikit-learn | sklearn/metrics/tests/test_classification.py | 15 | 49665 | from __future__ import division, print_function
import numpy as np
from scipy import linalg
from functools import partial
from itertools import product
import warnings
from sklearn import datasets
from sklearn import svm
from sklearn.datasets import make_multilabel_classification
from sklearn.preprocessing import LabelBinarizer, MultiLabelBinarizer
from sklearn.utils.fixes import np_version
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import hamming_loss
from sklearn.metrics import hinge_loss
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import log_loss
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import zero_one_loss
from sklearn.metrics import brier_score_loss
from sklearn.metrics.classification import _check_targets
from sklearn.metrics.base import UndefinedMetricWarning
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def test_multilabel_accuracy_score_subset_accuracy():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(accuracy_score(y1, y2), 0.5)
assert_equal(accuracy_score(y1, y1), 1)
assert_equal(accuracy_score(y2, y2), 1)
assert_equal(accuracy_score(y2, np.logical_not(y2)), 0)
assert_equal(accuracy_score(y1, np.logical_not(y1)), 0)
assert_equal(accuracy_score(y1, np.zeros(y1.shape)), 0)
assert_equal(accuracy_score(y2, np.zeros(y1.shape)), 0)
with ignore_warnings(): # sequence of sequences is deprecated
# List of tuple of label
y1 = [(1, 2,), (0, 2,)]
y2 = [(2,), (0, 2,)]
assert_equal(accuracy_score(y1, y2), 0.5)
assert_equal(accuracy_score(y1, y1), 1)
assert_equal(accuracy_score(y2, y2), 1)
assert_equal(accuracy_score(y2, [(), ()]), 0)
assert_equal(accuracy_score(y1, y2, normalize=False), 1)
assert_equal(accuracy_score(y1, y1, normalize=False), 2)
assert_equal(accuracy_score(y2, y2, normalize=False), 2)
assert_equal(accuracy_score(y2, [(), ()], normalize=False), 0)
def test_precision_recall_f1_score_binary():
# Test Precision Recall and F1 Score for binary classification task
y_true, y_pred, _ = make_prediction(binary=True)
# detailed measures for each class
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.73, 0.85], 2)
assert_array_almost_equal(r, [0.88, 0.68], 2)
assert_array_almost_equal(f, [0.80, 0.76], 2)
assert_array_equal(s, [25, 25])
# individual scoring function that can be used for grid search: in the
# binary class case the score is the value of the measure for the positive
# class (e.g. label == 1). This is deprecated for average != 'binary'.
assert_dep_warning = partial(assert_warns, DeprecationWarning)
for kwargs, my_assert in [({}, assert_no_warnings),
({'average': 'binary'}, assert_no_warnings),
({'average': 'micro'}, assert_dep_warning)]:
ps = my_assert(precision_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(ps, 0.85, 2)
rs = my_assert(recall_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(rs, 0.68, 2)
fs = my_assert(f1_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(fs, 0.76, 2)
assert_almost_equal(my_assert(fbeta_score, y_true, y_pred, beta=2,
**kwargs),
(1 + 2 ** 2) * ps * rs / (2 ** 2 * ps + rs), 2)
@ignore_warnings
def test_precision_recall_f_binary_single_class():
# Test precision, recall and F1 score behave with a single positive or
# negative class
# Such a case may occur with non-stratified cross-validation
assert_equal(1., precision_score([1, 1], [1, 1]))
assert_equal(1., recall_score([1, 1], [1, 1]))
assert_equal(1., f1_score([1, 1], [1, 1]))
assert_equal(0., precision_score([-1, -1], [-1, -1]))
assert_equal(0., recall_score([-1, -1], [-1, -1]))
assert_equal(0., f1_score([-1, -1], [-1, -1]))
def test_average_precision_score_score_non_binary_class():
# Test that average_precision_score function returns an error when trying
# to compute average_precision_score for multiclass task.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
average_precision_score, y_true, y_pred)
def test_average_precision_score_duplicate_values():
# Duplicate values with precision-recall require a different
# processing than when computing the AUC of a ROC, because the
# precision-recall curve is a decreasing curve
# The following situtation corresponds to a perfect
# test statistic, the average_precision_score should be 1
y_true = [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]
y_score = [0, .1, .1, .4, .5, .6, .6, .9, .9, 1, 1]
assert_equal(average_precision_score(y_true, y_score), 1)
def test_average_precision_score_tied_values():
# Here if we go from left to right in y_true, the 0 values are
# are separated from the 1 values, so it appears that we've
# Correctly sorted our classifications. But in fact the first two
# values have the same score (0.5) and so the first two values
# could be swapped around, creating an imperfect sorting. This
# imperfection should come through in the end score, making it less
# than one.
y_true = [0, 1, 1]
y_score = [.5, .5, .6]
assert_not_equal(average_precision_score(y_true, y_score), 1.)
@ignore_warnings
def test_precision_recall_fscore_support_errors():
y_true, y_pred, _ = make_prediction(binary=True)
# Bad beta
assert_raises(ValueError, precision_recall_fscore_support,
y_true, y_pred, beta=0.0)
# Bad pos_label
assert_raises(ValueError, precision_recall_fscore_support,
y_true, y_pred, pos_label=2, average='macro')
# Bad average option
assert_raises(ValueError, precision_recall_fscore_support,
[0, 1, 2], [1, 2, 0], average='mega')
def test_confusion_matrix_binary():
# Test confusion matrix - binary classification case
y_true, y_pred, _ = make_prediction(binary=True)
def test(y_true, y_pred):
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[22, 3], [8, 17]])
tp, fp, fn, tn = cm.flatten()
num = (tp * tn - fp * fn)
den = np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
true_mcc = 0 if den == 0 else num / den
mcc = matthews_corrcoef(y_true, y_pred)
assert_array_almost_equal(mcc, true_mcc, decimal=2)
assert_array_almost_equal(mcc, 0.57, decimal=2)
test(y_true, y_pred)
test([str(y) for y in y_true],
[str(y) for y in y_pred])
@ignore_warnings
def test_matthews_corrcoef_nan():
assert_equal(matthews_corrcoef([0], [1]), 0.0)
assert_equal(matthews_corrcoef([0, 0], [0, 1]), 0.0)
def test_precision_recall_f1_score_multiclass():
# Test Precision Recall and F1 Score for multiclass classification task
y_true, y_pred, _ = make_prediction(binary=False)
# compute scores with default labels introspection
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.83, 0.33, 0.42], 2)
assert_array_almost_equal(r, [0.79, 0.09, 0.90], 2)
assert_array_almost_equal(f, [0.81, 0.15, 0.57], 2)
assert_array_equal(s, [24, 31, 20])
# averaging tests
ps = precision_score(y_true, y_pred, pos_label=1, average='micro')
assert_array_almost_equal(ps, 0.53, 2)
rs = recall_score(y_true, y_pred, average='micro')
assert_array_almost_equal(rs, 0.53, 2)
fs = f1_score(y_true, y_pred, average='micro')
assert_array_almost_equal(fs, 0.53, 2)
ps = precision_score(y_true, y_pred, average='macro')
assert_array_almost_equal(ps, 0.53, 2)
rs = recall_score(y_true, y_pred, average='macro')
assert_array_almost_equal(rs, 0.60, 2)
fs = f1_score(y_true, y_pred, average='macro')
assert_array_almost_equal(fs, 0.51, 2)
ps = precision_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(ps, 0.51, 2)
rs = recall_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(rs, 0.53, 2)
fs = f1_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(fs, 0.47, 2)
assert_raises(ValueError, precision_score, y_true, y_pred,
average="samples")
assert_raises(ValueError, recall_score, y_true, y_pred, average="samples")
assert_raises(ValueError, f1_score, y_true, y_pred, average="samples")
assert_raises(ValueError, fbeta_score, y_true, y_pred, average="samples",
beta=0.5)
# same prediction but with and explicit label ordering
p, r, f, s = precision_recall_fscore_support(
y_true, y_pred, labels=[0, 2, 1], average=None)
assert_array_almost_equal(p, [0.83, 0.41, 0.33], 2)
assert_array_almost_equal(r, [0.79, 0.90, 0.10], 2)
assert_array_almost_equal(f, [0.81, 0.57, 0.15], 2)
assert_array_equal(s, [24, 20, 31])
def test_precision_refcall_f1_score_multilabel_unordered_labels():
# test that labels need not be sorted in the multilabel case
y_true = np.array([[1, 1, 0, 0]])
y_pred = np.array([[0, 0, 1, 1]])
for average in ['samples', 'micro', 'macro', 'weighted', None]:
p, r, f, s = precision_recall_fscore_support(
y_true, y_pred, labels=[4, 1, 2, 3], warn_for=[], average=average)
assert_array_equal(p, 0)
assert_array_equal(r, 0)
assert_array_equal(f, 0)
if average is None:
assert_array_equal(s, [0, 1, 1, 0])
def test_precision_recall_f1_score_multiclass_pos_label_none():
# Test Precision Recall and F1 Score for multiclass classification task
# GH Issue #1296
# initialize data
y_true = np.array([0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1])
y_pred = np.array([1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1])
# compute scores with default labels introspection
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
pos_label=None,
average='weighted')
def test_zero_precision_recall():
# Check that pathological cases do not bring NaNs
old_error_settings = np.seterr(all='raise')
try:
y_true = np.array([0, 1, 2, 0, 1, 2])
y_pred = np.array([2, 0, 1, 1, 2, 0])
assert_almost_equal(precision_score(y_true, y_pred,
average='weighted'), 0.0, 2)
assert_almost_equal(recall_score(y_true, y_pred, average='weighted'),
0.0, 2)
assert_almost_equal(f1_score(y_true, y_pred, average='weighted'),
0.0, 2)
finally:
np.seterr(**old_error_settings)
def test_confusion_matrix_multiclass():
# Test confusion matrix - multi-class case
y_true, y_pred, _ = make_prediction(binary=False)
def test(y_true, y_pred, string_type=False):
# compute confusion matrix with default labels introspection
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[19, 4, 1],
[4, 3, 24],
[0, 2, 18]])
# compute confusion matrix with explicit label ordering
labels = ['0', '2', '1'] if string_type else [0, 2, 1]
cm = confusion_matrix(y_true,
y_pred,
labels=labels)
assert_array_equal(cm, [[19, 1, 4],
[0, 18, 2],
[4, 24, 3]])
test(y_true, y_pred)
test(list(str(y) for y in y_true),
list(str(y) for y in y_pred),
string_type=True)
def test_confusion_matrix_multiclass_subset_labels():
# Test confusion matrix - multi-class case with subset of labels
y_true, y_pred, _ = make_prediction(binary=False)
# compute confusion matrix with only first two labels considered
cm = confusion_matrix(y_true, y_pred, labels=[0, 1])
assert_array_equal(cm, [[19, 4],
[4, 3]])
# compute confusion matrix with explicit label ordering for only subset
# of labels
cm = confusion_matrix(y_true, y_pred, labels=[2, 1])
assert_array_equal(cm, [[18, 2],
[24, 3]])
def test_classification_report_multiclass():
# Test performance report
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.83 0.79 0.81 24
versicolor 0.33 0.10 0.15 31
virginica 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names)
assert_equal(report, expected_report)
# print classification report with label detection
expected_report = """\
precision recall f1-score support
0 0.83 0.79 0.81 24
1 0.33 0.10 0.15 31
2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_digits():
# Test performance report with added digits in floating point values
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.82609 0.79167 0.80851 24
versicolor 0.33333 0.09677 0.15000 31
virginica 0.41860 0.90000 0.57143 20
avg / total 0.51375 0.53333 0.47310 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names, digits=5)
assert_equal(report, expected_report)
# print classification report with label detection
expected_report = """\
precision recall f1-score support
0 0.83 0.79 0.81 24
1 0.33 0.10 0.15 31
2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_string_label():
y_true, y_pred, _ = make_prediction(binary=False)
y_true = np.array(["blue", "green", "red"])[y_true]
y_pred = np.array(["blue", "green", "red"])[y_pred]
expected_report = """\
precision recall f1-score support
blue 0.83 0.79 0.81 24
green 0.33 0.10 0.15 31
red 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
expected_report = """\
precision recall f1-score support
a 0.83 0.79 0.81 24
b 0.33 0.10 0.15 31
c 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred,
target_names=["a", "b", "c"])
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_unicode_label():
y_true, y_pred, _ = make_prediction(binary=False)
labels = np.array([u"blue\xa2", u"green\xa2", u"red\xa2"])
y_true = labels[y_true]
y_pred = labels[y_pred]
expected_report = u"""\
precision recall f1-score support
blue\xa2 0.83 0.79 0.81 24
green\xa2 0.33 0.10 0.15 31
red\xa2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
if np_version[:3] < (1, 7, 0):
expected_message = ("NumPy < 1.7.0 does not implement"
" searchsorted on unicode data correctly.")
assert_raise_message(RuntimeError, expected_message,
classification_report, y_true, y_pred)
else:
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
@ignore_warnings # sequence of sequences is deprecated
def test_multilabel_classification_report():
n_classes = 4
n_samples = 50
make_ml = make_multilabel_classification
_, y_true_ll = make_ml(n_features=1, n_classes=n_classes, random_state=0,
n_samples=n_samples)
_, y_pred_ll = make_ml(n_features=1, n_classes=n_classes, random_state=1,
n_samples=n_samples)
expected_report = """\
precision recall f1-score support
0 0.50 0.67 0.57 24
1 0.51 0.74 0.61 27
2 0.29 0.08 0.12 26
3 0.52 0.56 0.54 27
avg / total 0.45 0.51 0.46 104
"""
lb = MultiLabelBinarizer()
lb.fit([range(4)])
y_true_bi = lb.transform(y_true_ll)
y_pred_bi = lb.transform(y_pred_ll)
for y_true, y_pred in [(y_true_ll, y_pred_ll), (y_true_bi, y_pred_bi)]:
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_multilabel_zero_one_loss_subset():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(zero_one_loss(y1, y2), 0.5)
assert_equal(zero_one_loss(y1, y1), 0)
assert_equal(zero_one_loss(y2, y2), 0)
assert_equal(zero_one_loss(y2, np.logical_not(y2)), 1)
assert_equal(zero_one_loss(y1, np.logical_not(y1)), 1)
assert_equal(zero_one_loss(y1, np.zeros(y1.shape)), 1)
assert_equal(zero_one_loss(y2, np.zeros(y1.shape)), 1)
with ignore_warnings(): # sequence of sequences is deprecated
# List of tuple of label
y1 = [(1, 2,), (0, 2,)]
y2 = [(2,), (0, 2,)]
assert_equal(zero_one_loss(y1, y2), 0.5)
assert_equal(zero_one_loss(y1, y1), 0)
assert_equal(zero_one_loss(y2, y2), 0)
assert_equal(zero_one_loss(y2, [(), ()]), 1)
assert_equal(zero_one_loss(y2, [tuple(), (10, )]), 1)
def test_multilabel_hamming_loss():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(hamming_loss(y1, y2), 1 / 6)
assert_equal(hamming_loss(y1, y1), 0)
assert_equal(hamming_loss(y2, y2), 0)
assert_equal(hamming_loss(y2, np.logical_not(y2)), 1)
assert_equal(hamming_loss(y1, np.logical_not(y1)), 1)
assert_equal(hamming_loss(y1, np.zeros(y1.shape)), 4 / 6)
assert_equal(hamming_loss(y2, np.zeros(y1.shape)), 0.5)
with ignore_warnings(): # sequence of sequences is deprecated
# List of tuple of label
y1 = [(1, 2,), (0, 2,)]
y2 = [(2,), (0, 2,)]
assert_equal(hamming_loss(y1, y2), 1 / 6)
assert_equal(hamming_loss(y1, y1), 0)
assert_equal(hamming_loss(y2, y2), 0)
assert_equal(hamming_loss(y2, [(), ()]), 0.75)
assert_equal(hamming_loss(y1, [tuple(), (10, )]), 0.625)
assert_almost_equal(hamming_loss(y2, [tuple(), (10, )],
classes=np.arange(11)), 0.1818, 2)
def test_multilabel_jaccard_similarity_score():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
# size(y1 \inter y2) = [1, 2]
# size(y1 \union y2) = [2, 2]
assert_equal(jaccard_similarity_score(y1, y2), 0.75)
assert_equal(jaccard_similarity_score(y1, y1), 1)
assert_equal(jaccard_similarity_score(y2, y2), 1)
assert_equal(jaccard_similarity_score(y2, np.logical_not(y2)), 0)
assert_equal(jaccard_similarity_score(y1, np.logical_not(y1)), 0)
assert_equal(jaccard_similarity_score(y1, np.zeros(y1.shape)), 0)
assert_equal(jaccard_similarity_score(y2, np.zeros(y1.shape)), 0)
with ignore_warnings(): # sequence of sequences is deprecated
# List of tuple of label
y1 = [(1, 2,), (0, 2,)]
y2 = [(2,), (0, 2,)]
assert_equal(jaccard_similarity_score(y1, y2), 0.75)
assert_equal(jaccard_similarity_score(y1, y1), 1)
assert_equal(jaccard_similarity_score(y2, y2), 1)
assert_equal(jaccard_similarity_score(y2, [(), ()]), 0)
# |y3 inter y4 | = [0, 1, 1]
# |y3 union y4 | = [2, 1, 3]
y3 = [(0,), (1,), (3,)]
y4 = [(4,), (4,), (5, 6)]
assert_almost_equal(jaccard_similarity_score(y3, y4), 0)
# |y5 inter y6 | = [0, 1, 1]
# |y5 union y6 | = [2, 1, 3]
y5 = [(0,), (1,), (2, 3)]
y6 = [(1,), (1,), (2, 0)]
assert_almost_equal(jaccard_similarity_score(y5, y6), (1 + 1 / 3) / 3)
@ignore_warnings
def test_precision_recall_f1_score_multilabel_1():
# Test precision_recall_f1_score on a crafted multilabel example
# First crafted example
y_true_ll = [(0,), (1,), (2, 3)]
y_pred_ll = [(1,), (1,), (2, 0)]
lb = LabelBinarizer()
lb.fit([range(4)])
y_true_bi = lb.transform(y_true_ll)
y_pred_bi = lb.transform(y_pred_ll)
for y_true, y_pred in [(y_true_ll, y_pred_ll), (y_true_bi, y_pred_bi)]:
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
#tp = [0, 1, 1, 0]
#fn = [1, 0, 0, 1]
#fp = [1, 1, 0, 0]
# Check per class
assert_array_almost_equal(p, [0.0, 0.5, 1.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 1.0, 1.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2)
assert_array_almost_equal(s, [1, 1, 1, 1], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.83, 1, 0], 2)
# Check macro
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 1.5 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2.5 / 1.5 * 0.25)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
# Check micro
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
# Check weigted
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 1.5 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2.5 / 1.5 * 0.25)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
# Check weigted
# |h(x_i) inter y_i | = [0, 1, 1]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [1, 1, 2]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.5)
@ignore_warnings
def test_precision_recall_f1_score_multilabel_2():
# Test precision_recall_f1_score on a crafted multilabel example 2
# Second crafted example
y_true_ll = [(1,), (2,), (2, 3)]
y_pred_ll = [(4,), (4,), (2, 1)]
lb = LabelBinarizer()
lb.fit([range(1, 5)])
y_true_bi = lb.transform(y_true_ll)
y_pred_bi = lb.transform(y_pred_ll)
for y_true, y_pred in [(y_true_ll, y_pred_ll), (y_true_bi, y_pred_bi)]:
# tp = [ 0. 1. 0. 0.]
# fp = [ 1. 0. 0. 2.]
# fn = [ 1. 1. 1. 0.]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
assert_array_almost_equal(p, [0.0, 1.0, 0.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 0.5, 0.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 0.66, 0.0, 0.0], 2)
assert_array_almost_equal(s, [1, 2, 1, 0], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.55, 0, 0], 2)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 0.25)
assert_almost_equal(r, 0.25)
assert_almost_equal(f, 2 * 0.25 * 0.25 / 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 0.25)
assert_almost_equal(r, 0.125)
assert_almost_equal(f, 2 / 12)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 2 / 4)
assert_almost_equal(r, 1 / 4)
assert_almost_equal(f, 2 / 3 * 2 / 4)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
# Check weigted
# |h(x_i) inter y_i | = [0, 0, 1]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [1, 1, 2]
assert_almost_equal(p, 1 / 6)
assert_almost_equal(r, 1 / 6)
assert_almost_equal(f, 2 / 4 * 1 / 3)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.1666, 2)
@ignore_warnings
def test_precision_recall_f1_score_with_an_empty_prediction():
y_true_ll = [(1,), (0,), (2, 1,)]
y_pred_ll = [tuple(), (3,), (2, 1)]
lb = LabelBinarizer()
lb.fit([range(4)])
y_true_bi = lb.transform(y_true_ll)
y_pred_bi = lb.transform(y_pred_ll)
for y_true, y_pred in [(y_true_ll, y_pred_ll), (y_true_bi, y_pred_bi)]:
# true_pos = [ 0. 1. 1. 0.]
# false_pos = [ 0. 0. 0. 1.]
# false_neg = [ 1. 1. 0. 0.]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
assert_array_almost_equal(p, [0.0, 1.0, 1.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 0.5, 1.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2)
assert_array_almost_equal(s, [1, 2, 1, 0], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.55, 1, 0], 2)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 1.5 / 4)
assert_almost_equal(f, 2.5 / (4 * 1.5))
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 2 / 3)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2 / 3 / (2 / 3 + 0.5))
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 3 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, (2 / 1.5 + 1) / 4)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
# |h(x_i) inter y_i | = [0, 0, 2]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [0, 1, 2]
assert_almost_equal(p, 1 / 3)
assert_almost_equal(r, 1 / 3)
assert_almost_equal(f, 1 / 3)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.333, 2)
def test_precision_recall_f1_no_labels():
y_true = np.zeros((20, 3))
y_pred = np.zeros_like(y_true)
# tp = [0, 0, 0]
# fn = [0, 0, 0]
# fp = [0, 0, 0]
# support = [0, 0, 0]
# |y_hat_i inter y_i | = [0, 0, 0]
# |y_i| = [0, 0, 0]
# |y_hat_i| = [0, 0, 0]
for beta in [1]:
p, r, f, s = assert_warns(UndefinedMetricWarning,
precision_recall_fscore_support,
y_true, y_pred, average=None, beta=beta)
assert_array_almost_equal(p, [0, 0, 0], 2)
assert_array_almost_equal(r, [0, 0, 0], 2)
assert_array_almost_equal(f, [0, 0, 0], 2)
assert_array_almost_equal(s, [0, 0, 0], 2)
fbeta = assert_warns(UndefinedMetricWarning, fbeta_score,
y_true, y_pred, beta=beta, average=None)
assert_array_almost_equal(fbeta, [0, 0, 0], 2)
for average in ["macro", "micro", "weighted", "samples"]:
p, r, f, s = assert_warns(UndefinedMetricWarning,
precision_recall_fscore_support,
y_true, y_pred, average=average,
beta=beta)
assert_almost_equal(p, 0)
assert_almost_equal(r, 0)
assert_almost_equal(f, 0)
assert_equal(s, None)
fbeta = assert_warns(UndefinedMetricWarning, fbeta_score,
y_true, y_pred,
beta=beta, average=average)
assert_almost_equal(fbeta, 0)
def test_prf_warnings():
# average of per-label scores
f, w = precision_recall_fscore_support, UndefinedMetricWarning
my_assert = assert_warns_message
for average in [None, 'weighted', 'macro']:
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 in labels with no predicted samples.')
my_assert(w, msg, f, [0, 1, 2], [1, 1, 2], average=average)
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 in labels with no true samples.')
my_assert(w, msg, f, [1, 1, 2], [0, 1, 2], average=average)
# average of per-sample scores
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 in samples with no predicted labels.')
my_assert(w, msg, f, np.array([[1, 0], [1, 0]]),
np.array([[1, 0], [0, 0]]), average='samples')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 in samples with no true labels.')
my_assert(w, msg, f, np.array([[1, 0], [0, 0]]),
np.array([[1, 0], [1, 0]]),
average='samples')
# single score: micro-average
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 due to no predicted samples.')
my_assert(w, msg, f, np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]), average='micro')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 due to no true samples.')
my_assert(w, msg, f, np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]), average='micro')
# single postive label
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 due to no predicted samples.')
my_assert(w, msg, f, [1, 1], [-1, -1], average='macro')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 due to no true samples.')
my_assert(w, msg, f, [-1, -1], [1, 1], average='macro')
def test_recall_warnings():
assert_no_warnings(recall_score,
np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
recall_score(np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
assert_equal(str(record.pop().message),
'Recall is ill-defined and '
'being set to 0.0 due to no true samples.')
def test_precision_warnings():
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
precision_score(np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
assert_equal(str(record.pop().message),
'Precision is ill-defined and '
'being set to 0.0 due to no predicted samples.')
assert_no_warnings(precision_score,
np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
def test_fscore_warnings():
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
for score in [f1_score, partial(fbeta_score, beta=2)]:
score(np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
assert_equal(str(record.pop().message),
'F-score is ill-defined and '
'being set to 0.0 due to no predicted samples.')
score(np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
assert_equal(str(record.pop().message),
'F-score is ill-defined and '
'being set to 0.0 due to no true samples.')
def test_prf_average_compat():
# Ensure warning if f1_score et al.'s average is implicit for multiclass
y_true = [1, 2, 3, 3]
y_pred = [1, 2, 3, 1]
y_true_bin = [0, 1, 1]
y_pred_bin = [0, 1, 0]
for metric in [precision_score, recall_score, f1_score,
partial(fbeta_score, beta=2)]:
score = assert_warns(DeprecationWarning, metric, y_true, y_pred)
score_weighted = assert_no_warnings(metric, y_true, y_pred,
average='weighted')
assert_equal(score, score_weighted,
'average does not act like "weighted" by default')
# check binary passes without warning
assert_no_warnings(metric, y_true_bin, y_pred_bin)
# but binary with pos_label=None should behave like multiclass
score = assert_warns(DeprecationWarning, metric,
y_true_bin, y_pred_bin, pos_label=None)
score_weighted = assert_no_warnings(metric, y_true_bin, y_pred_bin,
pos_label=None, average='weighted')
assert_equal(score, score_weighted,
'average does not act like "weighted" by default with '
'binary data and pos_label=None')
@ignore_warnings # sequence of sequences is deprecated
def test__check_targets():
# Check that _check_targets correctly merges target types, squeezes
# output and fails if input lengths differ.
IND = 'multilabel-indicator'
SEQ = 'multilabel-sequences'
MC = 'multiclass'
BIN = 'binary'
CNT = 'continuous'
MMC = 'multiclass-multioutput'
MCN = 'continuous-multioutput'
# all of length 3
EXAMPLES = [
(IND, np.array([[0, 1, 1], [1, 0, 0], [0, 0, 1]])),
# must not be considered binary
(IND, np.array([[0, 1], [1, 0], [1, 1]])),
(SEQ, [[2, 3], [1], [3]]),
(MC, [2, 3, 1]),
(BIN, [0, 1, 1]),
(CNT, [0., 1.5, 1.]),
(MC, np.array([[2], [3], [1]])),
(BIN, np.array([[0], [1], [1]])),
(CNT, np.array([[0.], [1.5], [1.]])),
(MMC, np.array([[0, 2], [1, 3], [2, 3]])),
(MCN, np.array([[0.5, 2.], [1.1, 3.], [2., 3.]])),
]
# expected type given input types, or None for error
# (types will be tried in either order)
EXPECTED = {
(IND, IND): IND,
(SEQ, SEQ): IND,
(MC, MC): MC,
(BIN, BIN): BIN,
(IND, SEQ): None,
(MC, SEQ): None,
(BIN, SEQ): None,
(MC, IND): None,
(BIN, IND): None,
(BIN, MC): MC,
# Disallowed types
(CNT, CNT): None,
(MMC, MMC): None,
(MCN, MCN): None,
(IND, CNT): None,
(SEQ, CNT): None,
(MC, CNT): None,
(BIN, CNT): None,
(MMC, CNT): None,
(MCN, CNT): None,
(IND, MMC): None,
(SEQ, MMC): None,
(MC, MMC): None,
(BIN, MMC): None,
(MCN, MMC): None,
(IND, MCN): None,
(SEQ, MCN): None,
(MC, MCN): None,
(BIN, MCN): None,
}
for (type1, y1), (type2, y2) in product(EXAMPLES, repeat=2):
try:
expected = EXPECTED[type1, type2]
except KeyError:
expected = EXPECTED[type2, type1]
if expected is None:
assert_raises(ValueError, _check_targets, y1, y2)
if type1 != type2:
assert_raise_message(
ValueError,
"Can't handle mix of {0} and {1}".format(type1, type2),
_check_targets, y1, y2)
else:
if type1 not in (BIN, MC, SEQ, IND):
assert_raise_message(ValueError,
"{0} is not supported".format(type1),
_check_targets, y1, y2)
else:
merged_type, y1out, y2out = _check_targets(y1, y2)
assert_equal(merged_type, expected)
if merged_type.startswith('multilabel'):
assert_equal(y1out.format, 'csr')
assert_equal(y2out.format, 'csr')
else:
assert_array_equal(y1out, np.squeeze(y1))
assert_array_equal(y2out, np.squeeze(y2))
assert_raises(ValueError, _check_targets, y1[:-1], y2)
def test_hinge_loss_binary():
y_true = np.array([-1, 1, 1, -1])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4)
y_true = np.array([0, 2, 2, 0])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4)
def test_hinge_loss_multiclass():
pred_decision = np.array([
[0.36, -0.17, -0.58, -0.99],
[-0.54, -0.37, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.54, -0.38, -0.48, -0.58],
[-2.36, -0.79, -0.27, 0.24],
[-1.45, -0.58, -0.38, -0.17]
])
y_true = np.array([0, 1, 2, 1, 3, 2])
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][3] + pred_decision[4][2],
1 - pred_decision[5][2] + pred_decision[5][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision),
dummy_hinge_loss)
def test_hinge_loss_multiclass_missing_labels_with_labels_none():
y_true = np.array([0, 1, 2, 2])
pred_decision = np.array([
[1.27, 0.034, -0.68, -1.40],
[-1.45, -0.58, -0.38, -0.17],
[-2.36, -0.79, -0.27, 0.24],
[-2.36, -0.79, -0.27, 0.24]
])
error_message = ("Please include all labels in y_true "
"or pass labels as third argument")
assert_raise_message(ValueError,
error_message,
hinge_loss, y_true, pred_decision)
def test_hinge_loss_multiclass_with_missing_labels():
pred_decision = np.array([
[0.36, -0.17, -0.58, -0.99],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17]
])
y_true = np.array([0, 1, 2, 1, 2])
labels = np.array([0, 1, 2, 3])
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][2] + pred_decision[4][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision, labels=labels),
dummy_hinge_loss)
def test_hinge_loss_multiclass_invariance_lists():
# Currently, invariance of string and integer labels cannot be tested
# in common invariance tests because invariance tests for multiclass
# decision functions is not implemented yet.
y_true = ['blue', 'green', 'red',
'green', 'white', 'red']
pred_decision = [
[0.36, -0.17, -0.58, -0.99],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.55, -0.38, -0.48, -0.58],
[-2.36, -0.79, -0.27, 0.24],
[-1.45, -0.58, -0.38, -0.17]]
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][3] + pred_decision[4][2],
1 - pred_decision[5][2] + pred_decision[5][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision),
dummy_hinge_loss)
def test_log_loss():
# binary case with symbolic labels ("no" < "yes")
y_true = ["no", "no", "no", "yes", "yes", "yes"]
y_pred = np.array([[0.5, 0.5], [0.1, 0.9], [0.01, 0.99],
[0.9, 0.1], [0.75, 0.25], [0.001, 0.999]])
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.8817971)
# multiclass case; adapted from http://bit.ly/RJJHWA
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7, 0.1], [0.6, 0.2, 0.2], [0.6, 0.1, 0.3]]
loss = log_loss(y_true, y_pred, normalize=True)
assert_almost_equal(loss, 0.6904911)
# check that we got all the shapes and axes right
# by doubling the length of y_true and y_pred
y_true *= 2
y_pred *= 2
loss = log_loss(y_true, y_pred, normalize=False)
assert_almost_equal(loss, 0.6904911 * 6, decimal=6)
# check eps and handling of absolute zero and one probabilities
y_pred = np.asarray(y_pred) > .5
loss = log_loss(y_true, y_pred, normalize=True, eps=.1)
assert_almost_equal(loss, log_loss(y_true, np.clip(y_pred, .1, .9)))
# raise error if number of classes are not equal.
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1]]
assert_raises(ValueError, log_loss, y_true, y_pred)
# case when y_true is a string array object
y_true = ["ham", "spam", "spam", "ham"]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1], [0.7, 0.2]]
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.0383217, decimal=6)
def test_brier_score_loss():
# Check brier_score_loss function
y_true = np.array([0, 1, 1, 0, 1, 1])
y_pred = np.array([0.1, 0.8, 0.9, 0.3, 1., 0.95])
true_score = linalg.norm(y_true - y_pred) ** 2 / len(y_true)
assert_almost_equal(brier_score_loss(y_true, y_true), 0.0)
assert_almost_equal(brier_score_loss(y_true, y_pred), true_score)
assert_almost_equal(brier_score_loss(1. + y_true, y_pred),
true_score)
assert_almost_equal(brier_score_loss(2 * y_true - 1, y_pred),
true_score)
assert_raises(ValueError, brier_score_loss, y_true, y_pred[1:])
assert_raises(ValueError, brier_score_loss, y_true, y_pred + 1.)
assert_raises(ValueError, brier_score_loss, y_true, y_pred - 1.)
| bsd-3-clause |
aleksandr-bakanov/astropy | examples/coordinates/plot_sgr-coordinate-frame.py | 3 | 10549 | # -*- coding: utf-8 -*-
"""
==========================================================
Create a new coordinate class (for the Sagittarius stream)
==========================================================
This document describes in detail how to subclass and define a custom spherical
coordinate frame, as discussed in :ref:`astropy-coordinates-design` and the
docstring for `~astropy.coordinates.BaseCoordinateFrame`. In this example, we
will define a coordinate system defined by the plane of orbit of the Sagittarius
Dwarf Galaxy (hereafter Sgr; as defined in Majewski et al. 2003). The Sgr
coordinate system is often referred to in terms of two angular coordinates,
:math:`\Lambda,B`.
To do this, we need to define a subclass of
`~astropy.coordinates.BaseCoordinateFrame` that knows the names and units of the
coordinate system angles in each of the supported representations. In this case
we support `~astropy.coordinates.SphericalRepresentation` with "Lambda" and
"Beta". Then we have to define the transformation from this coordinate system to
some other built-in system. Here we will use Galactic coordinates, represented
by the `~astropy.coordinates.Galactic` class.
See Also
--------
* The `gala package <http://gala.adrian.pw/>`_, which defines a number of
Astropy coordinate frames for stellar stream coordinate systems.
* Majewski et al. 2003, "A Two Micron All Sky Survey View of the Sagittarius
Dwarf Galaxy. I. Morphology of the Sagittarius Core and Tidal Arms",
https://arxiv.org/abs/astro-ph/0304198
* Law & Majewski 2010, "The Sagittarius Dwarf Galaxy: A Model for Evolution in a
Triaxial Milky Way Halo", https://arxiv.org/abs/1003.1132
* David Law's Sgr info page http://www.stsci.edu/~dlaw/Sgr/
*By: Adrian Price-Whelan, Erik Tollerud*
*License: BSD*
"""
##############################################################################
# Make `print` work the same in all versions of Python, set up numpy,
# matplotlib, and use a nicer set of plot parameters:
import numpy as np
import matplotlib.pyplot as plt
from astropy.visualization import astropy_mpl_style
plt.style.use(astropy_mpl_style)
##############################################################################
# Import the packages necessary for coordinates
from astropy.coordinates import frame_transform_graph
from astropy.coordinates.matrix_utilities import rotation_matrix, matrix_product, matrix_transpose
import astropy.coordinates as coord
import astropy.units as u
##############################################################################
# The first step is to create a new class, which we'll call
# ``Sagittarius`` and make it a subclass of
# `~astropy.coordinates.BaseCoordinateFrame`:
class Sagittarius(coord.BaseCoordinateFrame):
"""
A Heliocentric spherical coordinate system defined by the orbit
of the Sagittarius dwarf galaxy, as described in
http://adsabs.harvard.edu/abs/2003ApJ...599.1082M
and further explained in
http://www.stsci.edu/~dlaw/Sgr/.
Parameters
----------
representation : `~astropy.coordinates.BaseRepresentation` or None
A representation object or None to have no data (or use the other keywords)
Lambda : `~astropy.coordinates.Angle`, optional, must be keyword
The longitude-like angle corresponding to Sagittarius' orbit.
Beta : `~astropy.coordinates.Angle`, optional, must be keyword
The latitude-like angle corresponding to Sagittarius' orbit.
distance : `Quantity`, optional, must be keyword
The Distance for this object along the line-of-sight.
pm_Lambda_cosBeta : :class:`~astropy.units.Quantity`, optional, must be keyword
The proper motion along the stream in ``Lambda`` (including the
``cos(Beta)`` factor) for this object (``pm_Beta`` must also be given).
pm_Beta : :class:`~astropy.units.Quantity`, optional, must be keyword
The proper motion in Declination for this object (``pm_ra_cosdec`` must
also be given).
radial_velocity : :class:`~astropy.units.Quantity`, optional, must be keyword
The radial velocity of this object.
"""
default_representation = coord.SphericalRepresentation
default_differential = coord.SphericalCosLatDifferential
frame_specific_representation_info = {
coord.SphericalRepresentation: [
coord.RepresentationMapping('lon', 'Lambda'),
coord.RepresentationMapping('lat', 'Beta'),
coord.RepresentationMapping('distance', 'distance')]
}
##############################################################################
# Breaking this down line-by-line, we define the class as a subclass of
# `~astropy.coordinates.BaseCoordinateFrame`. Then we include a descriptive
# docstring. The final lines are class-level attributes that specify the
# default representation for the data, default differential for the velocity
# information, and mappings from the attribute names used by representation
# objects to the names that are to be used by the ``Sagittarius`` frame. In this
# case we override the names in the spherical representations but don't do
# anything with other representations like cartesian or cylindrical.
#
# Next we have to define the transformation from this coordinate system to some
# other built-in coordinate system; we will use Galactic coordinates. We can do
# this by defining functions that return transformation matrices, or by simply
# defining a function that accepts a coordinate and returns a new coordinate in
# the new system. Because the transformation to the Sagittarius coordinate
# system is just a spherical rotation from Galactic coordinates, we'll just
# define a function that returns this matrix. We'll start by constructing the
# transformation matrix using pre-determined Euler angles and the
# ``rotation_matrix`` helper function:
SGR_PHI = (180 + 3.75) * u.degree # Euler angles (from Law & Majewski 2010)
SGR_THETA = (90 - 13.46) * u.degree
SGR_PSI = (180 + 14.111534) * u.degree
# Generate the rotation matrix using the x-convention (see Goldstein)
D = rotation_matrix(SGR_PHI, "z")
C = rotation_matrix(SGR_THETA, "x")
B = rotation_matrix(SGR_PSI, "z")
A = np.diag([1.,1.,-1.])
SGR_MATRIX = matrix_product(A, B, C, D)
##############################################################################
# Since we already constructed the transformation (rotation) matrix above, and
# the inverse of a rotation matrix is just its transpose, the required
# transformation functions are very simple:
@frame_transform_graph.transform(coord.StaticMatrixTransform, coord.Galactic, Sagittarius)
def galactic_to_sgr():
""" Compute the transformation matrix from Galactic spherical to
heliocentric Sgr coordinates.
"""
return SGR_MATRIX
##############################################################################
# The decorator ``@frame_transform_graph.transform(coord.StaticMatrixTransform,
# coord.Galactic, Sagittarius)`` registers this function on the
# ``frame_transform_graph`` as a coordinate transformation. Inside the function,
# we simply return the previously defined rotation matrix.
#
# We then register the inverse transformation by using the transpose of the
# rotation matrix (which is faster to compute than the inverse):
@frame_transform_graph.transform(coord.StaticMatrixTransform, Sagittarius, coord.Galactic)
def sgr_to_galactic():
""" Compute the transformation matrix from heliocentric Sgr coordinates to
spherical Galactic.
"""
return matrix_transpose(SGR_MATRIX)
##############################################################################
# Now that we've registered these transformations between ``Sagittarius`` and
# `~astropy.coordinates.Galactic`, we can transform between *any* coordinate
# system and ``Sagittarius`` (as long as the other system has a path to
# transform to `~astropy.coordinates.Galactic`). For example, to transform from
# ICRS coordinates to ``Sagittarius``, we would do:
icrs = coord.ICRS(280.161732*u.degree, 11.91934*u.degree)
sgr = icrs.transform_to(Sagittarius)
print(sgr)
##############################################################################
# Or, to transform from the ``Sagittarius`` frame to ICRS coordinates (in this
# case, a line along the ``Sagittarius`` x-y plane):
sgr = Sagittarius(Lambda=np.linspace(0, 2*np.pi, 128)*u.radian,
Beta=np.zeros(128)*u.radian)
icrs = sgr.transform_to(coord.ICRS)
print(icrs)
##############################################################################
# As an example, we'll now plot the points in both coordinate systems:
fig, axes = plt.subplots(2, 1, figsize=(8, 10),
subplot_kw={'projection': 'aitoff'})
axes[0].set_title("Sagittarius")
axes[0].plot(sgr.Lambda.wrap_at(180*u.deg).radian, sgr.Beta.radian,
linestyle='none', marker='.')
axes[1].set_title("ICRS")
axes[1].plot(icrs.ra.wrap_at(180*u.deg).radian, icrs.dec.radian,
linestyle='none', marker='.')
plt.show()
##############################################################################
# This particular transformation is just a spherical rotation, which is a
# special case of an Affine transformation with no vector offset. The
# transformation of velocity components is therefore natively supported as
# well:
sgr = Sagittarius(Lambda=np.linspace(0, 2*np.pi, 128)*u.radian,
Beta=np.zeros(128)*u.radian,
pm_Lambda_cosBeta=np.random.uniform(-5, 5, 128)*u.mas/u.yr,
pm_Beta=np.zeros(128)*u.mas/u.yr)
icrs = sgr.transform_to(coord.ICRS)
print(icrs)
fig, axes = plt.subplots(3, 1, figsize=(8, 10), sharex=True)
axes[0].set_title("Sagittarius")
axes[0].plot(sgr.Lambda.degree,
sgr.pm_Lambda_cosBeta.value,
linestyle='none', marker='.')
axes[0].set_xlabel(r"$\Lambda$ [deg]")
axes[0].set_ylabel(r"$\mu_\Lambda \, \cos B$ [{0}]"
.format(sgr.pm_Lambda_cosBeta.unit.to_string('latex_inline')))
axes[1].set_title("ICRS")
axes[1].plot(icrs.ra.degree, icrs.pm_ra_cosdec.value,
linestyle='none', marker='.')
axes[1].set_ylabel(r"$\mu_\alpha \, \cos\delta$ [{0}]"
.format(icrs.pm_ra_cosdec.unit.to_string('latex_inline')))
axes[2].set_title("ICRS")
axes[2].plot(icrs.ra.degree, icrs.pm_dec.value,
linestyle='none', marker='.')
axes[2].set_xlabel("RA [deg]")
axes[2].set_ylabel(r"$\mu_\delta$ [{0}]"
.format(icrs.pm_dec.unit.to_string('latex_inline')))
plt.show()
| bsd-3-clause |
google/matched_markets | matched_markets/methodology/tbrmmdata.py | 1 | 8887 | # Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""TBR Matched Markets: TBRMMData class.
"""
from typing import List, Optional, Set, Text, Tuple, Union
from matched_markets.methodology import geoeligibility
import numpy as np
import pandas as pd
Array = np.ndarray
GeoID = Text
GeoIndex = int
GeoRef = Union[GeoID, GeoIndex]
OrderedGeos = Union[List[GeoRef], Tuple[GeoRef]]
GeoIndexSet = Set[GeoIndex]
Vector = List[float]
GeoEligibility = geoeligibility.GeoEligibility
GeoAssignments = geoeligibility.GeoAssignments
class TBRMMData:
"""Geo time series data for TBR Matched Markets.
Transforms the geo time series data into a canonical format for easier
manipulation; calculates average aggregate share of each geo; derives the list
of geos that are in data and in the geo eligibility matrix that can be
assigned to treatment or control ('assignable' geos).
The assignable geos is the intersection of geos in the GeoEligibility object
and those in the data, minus those that are specified to be excluded.
If the GeoEligibility object is not specified at initialization, a default
GeoEligibility object is created, with no restrictions in geo assignment.
Ensures that the set of geos in the geo eligibility object is a subset of
those in the data, and that the geos that must be included in the design must
also be there in the data. However the geo eligibility object can refer to a
subset of geos in the data.
Attributes:
df: Data in canonical format (geos in rows, dates in columns).
geo_eligibility: The GeoEligibility object.
geo_share: Aggregate share of each geo in terms of the response volume.
geos_in_data: A complete set of geos that are found in the data set.
assignable: Set of geo IDs that are assignable to control and/or treatment.
geo_index: A user-defined subset of the assignable geo IDs.
geo_assignments: Geo assignments of geos specified by 'geo_index'.
"""
# Minimum correlation bound for identifying 'noisy geos'.
_min_corr_bound = 0.5
df: pd.DataFrame
geo_eligibility: GeoEligibility
geo_share: pd.Series = None
geos_in_data: Set[GeoID] = None
assignable: Set[GeoID] = None
geo_assignments: GeoAssignments = None
_geo_index: List[GeoRef] = None # Storage for 'geo_index'.
_array: Optional[Array] = None # Time series of the geos by geo_index.
_array_geo_share: Optional[Array] = None # Subset of geo_share[geo_index].
def __init__(
self,
df: pd.DataFrame,
response_column: str,
geo_eligibility: Optional[GeoEligibility] = None):
"""Initialize and validate a TBRMMData object.
1. Pivots the data frame 'df' such that geos are in the rows and dates in
columns.
2. Calculates mean market share for each geo.
3. Creates a default GeoEligibility object if omitted.
Args:
df: (pandas.DataFrame) DataFrame with mandatory columns 'geo', 'date' and
the column representing the response.
response_column: String. Name of the response metric column.
geo_eligibility: a GeoEligibility object, or if not specified, None (in
case a default GeoEligibility object will be constructed).
"""
df = df.copy()
required_columns = {'date', 'geo', response_column}
missing_columns = required_columns - set(df.columns)
if missing_columns:
raise ValueError('Missing column(s): ' + ', '.join(missing_columns))
# Ensure that the geo column is a string.
df.geo = df.geo.astype('str')
# Transform into a canonical format with geos in rows, dates in columns,
# geos (rows) sorted with those with the largest volume first so that
# the largest geos are iterated first (those with the smallest row index).
df = df.pivot_table(values=response_column, index='geo', columns='date',
fill_value=0)
# Calculate the average 'market share' based on all data.
geo_means = df.mean(axis=1).sort_values(ascending=False)
geo_share = geo_means / sum(geo_means)
geos_in_data = set(geo_means.index)
# For convenience sort the geos (rows) in descending order.
self.df = df.loc[list(geo_means.index)]
self.geo_share = geo_share
self.geos_in_data = geos_in_data
if geo_eligibility is None:
# Default object will have all geos with all possible assignment
# possibilities.
gelig_dict = {'geo': list(geo_means.index),
'control': 1,
'treatment': 1,
'exclude': 1}
gelig_df = pd.DataFrame(gelig_dict)
geo_eligibility = GeoEligibility(gelig_df)
geo_assignments = geo_eligibility.get_eligible_assignments()
# Ensure that the geo eligibility object only has geos that are
# in the data.
common_geos = geos_in_data & geo_assignments.all
if common_geos != geo_assignments.all:
# Ensure that geos that cannot be excluded are not missing.
geos_cannot_be_excluded = geo_assignments.all - geo_assignments.x
geos_missing = geos_cannot_be_excluded - geos_in_data
if geos_missing:
raise ValueError('Required geos {} were not found '
'in the data'.format(sorted(geos_missing)))
df_elig = geo_eligibility.data.loc[common_geos]
geo_eligibility = GeoEligibility(df_elig)
geo_assignments = geo_eligibility.get_eligible_assignments()
assignable = geo_assignments.all - geo_assignments.x_fixed
self.assignable = assignable # pytype: disable=annotation-type-mismatch
self.geo_eligibility = geo_eligibility
@property
def geo_index(self) -> OrderedGeos:
return self._geo_index
@geo_index.setter
def geo_index(self, geos: OrderedGeos):
"""Fix the set of geos that will be used.
1. Creates a subset of the DataFrame attribute .df as a Numpy array whose
rows represent the time series of each geo in the argument 'geos'. This is
a convenient and computationally fast form to produce aggregate time series
of the data.
2. Creates a GeoAssignment object whose values refer to the rows of the
array (values are integers 0 .. number of geos minus 1). Using row numbers
for the geo IDs will be faster than using indices.
Args:
geos: Geo IDs for the subset of the geos that will be included in the
Matched Markets analysis.
"""
missing_geos = set(geos) - self.assignable
if missing_geos:
missing_geos = sorted(list(missing_geos))
raise ValueError('Unassignable geo(s): ' + ', '.join(missing_geos))
self.geo_assignments = self.geo_eligibility.get_eligible_assignments(
geos,
indices=True)
self._geo_index = geos
self._array = self.df.loc[geos].to_numpy()
self._array_geo_share = np.array(self.geo_share[geos])
def aggregate_time_series(self, geo_indices: GeoIndexSet) -> Vector:
"""Return the aggregate the time series over a set of chosen geos.
Args:
geo_indices: Set of geo indices referring to the geos in self.geo_index (0
.. number of geos in geo_index - 1).
Returns:
A time series representing the sum of the geos indicated by
'geo_indices'.
"""
return self._array[list(geo_indices)].sum(axis=0)
def aggregate_geo_share(self, geo_indices: GeoIndexSet) -> float:
"""Share of the given geos' response as percentage of the total.
Args:
geo_indices: Set of geo indices referring to the geos in self.geo_index (0
.. number of geos in geo_index - 1).
Returns:
Aggregate share of geos indicated by 'geo_indices'.
"""
return self._array_geo_share[list(geo_indices)].sum()
@property
def leave_one_out_correlations(self) -> pd.Series:
"""Correlations between each geo and the aggregate of the rest of the geos.
Returns:
A pd.Series of correlations indexed by 'geo'.
"""
aggregate_ts = self.df.sum(axis=0)
def corr_leave_one_out(x):
return np.corrcoef(x, aggregate_ts - x)[0, 1]
return self.df.apply(corr_leave_one_out, axis=1)
@property
def noisy_geos(self) -> Set[GeoID]:
"""Returns geos that have a low or negative correlation with the rest."""
correlations = self.leave_one_out_correlations
return set(correlations[correlations <= self._min_corr_bound].index)
| apache-2.0 |
liuwenf/moose | python/peacock/tests/postprocessor_tab/test_VectorPostprocessorSelectPlugin.py | 5 | 4183 | #!/usr/bin/env python
import sys
import os
import unittest
import shutil
import time
import glob
from PyQt5 import QtCore, QtWidgets
from peacock.PostprocessorViewer.plugins.PostprocessorSelectPlugin import main
from peacock.utils import Testing
import mooseutils
class TestVectorPostprocessorSelectPlugin(Testing.PeacockImageTestCase):
"""
Test class for the ArtistToggleWidget which toggles postprocessor lines.
"""
#: QApplication: The main App for QT, this must be static to work correctly.
qapp = QtWidgets.QApplication(sys.argv)
def setUp(self):
"""
Creates the GUI containing the ArtistGroupWidget and the matplotlib figure axes.
"""
# Filenames to load
self._filename = '{}_test_*.csv'.format(self.__class__.__name__)
self._filename2 = '{}_test2_*.csv'.format(self.__class__.__name__)
# Read the data
filenames = [self._filename, self._filename2]
self._control, self._widget, self._window = main(filenames, mooseutils.VectorPostprocessorReader)
def copyfiles(self, partial=False):
"""
Move files into the temporary location.
"""
if partial:
shutil.copyfile('../input/vpp_000.csv', '{}_test_000.csv'.format(self.__class__.__name__))
shutil.copyfile('../input/vpp_001.csv', '{}_test_001.csv'.format(self.__class__.__name__))
else:
for i in [0,1,2,4]:
shutil.copyfile('../input/vpp_00{}.csv'.format(i), '{}_test_00{}.csv'.format(self.__class__.__name__, i))
for i in [0,1,3,5,7,9]:
shutil.copyfile('../input/vpp2_000{}.csv'.format(i), '{}_test2_000{}.csv'.format(self.__class__.__name__, i))
for data in self._widget._data:
data.load()
def tearDown(self):
"""
Remove temporary files.
"""
for filename in glob.glob(self._filename):
os.remove(filename)
for filename in glob.glob(self._filename2):
os.remove(filename)
def testEmpty(self):
"""
Test that an empty plot is possible.
"""
self.assertImage('testEmpty.png')
def testSelect(self):
"""
Test that plotting from multiple files works.
"""
self.copyfiles()
vars = ['y', 't*x**2']
for i in range(len(vars)):
self._control._groups[i]._toggles[vars[i]].CheckBox.setCheckState(QtCore.Qt.Checked)
self._control._groups[i]._toggles[vars[i]].CheckBox.clicked.emit(True)
self.assertImage('testSelect.png')
def testUpdateData(self):
"""
Test that a postprocessor data updates when file is changed.
"""
self.copyfiles(partial=True)
var = 'y'
self._control._groups[0]._toggles[var].CheckBox.setCheckState(QtCore.Qt.Checked)
self._control._groups[0]._toggles[var].CheckBox.clicked.emit(True)
self.assertImage('testUpdateData0.png')
# Reload the data (this would be done via a Timer)
time.sleep(1) # need to wait a bit for the modified time to change
self.copyfiles()
self.assertImage('testUpdateData1.png')
def testRepr(self):
"""
Test python scripting.
"""
self.copyfiles()
vars = ['y', 't*x**2']
for i in range(len(vars)):
self._control._groups[i]._toggles[vars[i]].CheckBox.setCheckState(QtCore.Qt.Checked)
self._control._groups[i]._toggles[vars[i]].CheckBox.clicked.emit(True)
output, imports = self._control.repr()
self.assertIn("data = mooseutils.VectorPostprocessorReader('TestVectorPostprocessorSelectPlugin_test_*.csv')", output)
self.assertIn("x = data('index (Peacock)')", output)
self.assertIn("y = data('y')", output)
self.assertIn("axes0.plot(x, y, marker='', linewidth=1, color=[0.698, 0.875, 0.541, 1.0], markersize=1, linestyle='-', label='y')", output)
self.assertIn("data = mooseutils.VectorPostprocessorReader('TestVectorPostprocessorSelectPlugin_test2_*.csv')", output)
if __name__ == '__main__':
unittest.main(module=__name__, verbosity=2)
| lgpl-2.1 |
xhochy/arrow | python/pyarrow/tests/test_types.py | 1 | 29881 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
from collections.abc import Iterator
import datetime
import sys
import pickle
import pytest
import pytz
import hypothesis as h
import hypothesis.strategies as st
import hypothesis.extra.pytz as tzst
import weakref
import numpy as np
import pyarrow as pa
import pyarrow.types as types
import pyarrow.tests.strategies as past
def get_many_types():
# returning them from a function is required because of pa.dictionary
# type holds a pyarrow array and test_array.py::test_toal_bytes_allocated
# checks that the default memory pool has zero allocated bytes
return (
pa.null(),
pa.bool_(),
pa.int32(),
pa.time32('s'),
pa.time64('us'),
pa.date32(),
pa.timestamp('us'),
pa.timestamp('us', tz='UTC'),
pa.timestamp('us', tz='Europe/Paris'),
pa.duration('s'),
pa.float16(),
pa.float32(),
pa.float64(),
pa.decimal128(19, 4),
pa.decimal256(76, 38),
pa.string(),
pa.binary(),
pa.binary(10),
pa.large_string(),
pa.large_binary(),
pa.list_(pa.int32()),
pa.list_(pa.int32(), 2),
pa.large_list(pa.uint16()),
pa.map_(pa.string(), pa.int32()),
pa.struct([pa.field('a', pa.int32()),
pa.field('b', pa.int8()),
pa.field('c', pa.string())]),
pa.struct([pa.field('a', pa.int32(), nullable=False),
pa.field('b', pa.int8(), nullable=False),
pa.field('c', pa.string())]),
pa.union([pa.field('a', pa.binary(10)),
pa.field('b', pa.string())], mode=pa.lib.UnionMode_DENSE),
pa.union([pa.field('a', pa.binary(10)),
pa.field('b', pa.string())], mode=pa.lib.UnionMode_DENSE,
type_codes=[4, 8]),
pa.union([pa.field('a', pa.binary(10)),
pa.field('b', pa.string())], mode=pa.lib.UnionMode_SPARSE),
pa.union([pa.field('a', pa.binary(10), nullable=False),
pa.field('b', pa.string())], mode=pa.lib.UnionMode_SPARSE),
pa.dictionary(pa.int32(), pa.string())
)
def test_is_boolean():
assert types.is_boolean(pa.bool_())
assert not types.is_boolean(pa.int8())
def test_is_integer():
signed_ints = [pa.int8(), pa.int16(), pa.int32(), pa.int64()]
unsigned_ints = [pa.uint8(), pa.uint16(), pa.uint32(), pa.uint64()]
for t in signed_ints + unsigned_ints:
assert types.is_integer(t)
for t in signed_ints:
assert types.is_signed_integer(t)
assert not types.is_unsigned_integer(t)
for t in unsigned_ints:
assert types.is_unsigned_integer(t)
assert not types.is_signed_integer(t)
assert not types.is_integer(pa.float32())
assert not types.is_signed_integer(pa.float32())
def test_is_floating():
for t in [pa.float16(), pa.float32(), pa.float64()]:
assert types.is_floating(t)
assert not types.is_floating(pa.int32())
def test_is_null():
assert types.is_null(pa.null())
assert not types.is_null(pa.list_(pa.int32()))
def test_null_field_may_not_be_non_nullable():
# ARROW-7273
with pytest.raises(ValueError):
pa.field('f0', pa.null(), nullable=False)
def test_is_decimal():
decimal128 = pa.decimal128(19, 4)
decimal256 = pa.decimal256(76, 38)
int32 = pa.int32()
assert types.is_decimal(decimal128)
assert types.is_decimal(decimal256)
assert not types.is_decimal(int32)
assert types.is_decimal128(decimal128)
assert not types.is_decimal128(decimal256)
assert not types.is_decimal128(int32)
assert not types.is_decimal256(decimal128)
assert types.is_decimal256(decimal256)
assert not types.is_decimal256(int32)
def test_is_list():
a = pa.list_(pa.int32())
b = pa.large_list(pa.int32())
c = pa.list_(pa.int32(), 3)
assert types.is_list(a)
assert not types.is_large_list(a)
assert not types.is_fixed_size_list(a)
assert types.is_large_list(b)
assert not types.is_list(b)
assert not types.is_fixed_size_list(b)
assert types.is_fixed_size_list(c)
assert not types.is_list(c)
assert not types.is_large_list(c)
assert not types.is_list(pa.int32())
def test_is_map():
m = pa.map_(pa.utf8(), pa.int32())
assert types.is_map(m)
assert not types.is_map(pa.int32())
entries_type = pa.struct([pa.field('key', pa.int8()),
pa.field('value', pa.int8())])
list_type = pa.list_(entries_type)
assert not types.is_map(list_type)
def test_is_dictionary():
assert types.is_dictionary(pa.dictionary(pa.int32(), pa.string()))
assert not types.is_dictionary(pa.int32())
def test_is_nested_or_struct():
struct_ex = pa.struct([pa.field('a', pa.int32()),
pa.field('b', pa.int8()),
pa.field('c', pa.string())])
assert types.is_struct(struct_ex)
assert not types.is_struct(pa.list_(pa.int32()))
assert types.is_nested(struct_ex)
assert types.is_nested(pa.list_(pa.int32()))
assert types.is_nested(pa.large_list(pa.int32()))
assert not types.is_nested(pa.int32())
def test_is_union():
for mode in [pa.lib.UnionMode_SPARSE, pa.lib.UnionMode_DENSE]:
assert types.is_union(pa.union([pa.field('a', pa.int32()),
pa.field('b', pa.int8()),
pa.field('c', pa.string())],
mode=mode))
assert not types.is_union(pa.list_(pa.int32()))
# TODO(wesm): is_map, once implemented
def test_is_binary_string():
assert types.is_binary(pa.binary())
assert not types.is_binary(pa.string())
assert not types.is_binary(pa.large_binary())
assert not types.is_binary(pa.large_string())
assert types.is_string(pa.string())
assert types.is_unicode(pa.string())
assert not types.is_string(pa.binary())
assert not types.is_string(pa.large_string())
assert not types.is_string(pa.large_binary())
assert types.is_large_binary(pa.large_binary())
assert not types.is_large_binary(pa.large_string())
assert not types.is_large_binary(pa.binary())
assert not types.is_large_binary(pa.string())
assert types.is_large_string(pa.large_string())
assert not types.is_large_string(pa.large_binary())
assert not types.is_large_string(pa.string())
assert not types.is_large_string(pa.binary())
assert types.is_fixed_size_binary(pa.binary(5))
assert not types.is_fixed_size_binary(pa.binary())
def test_is_temporal_date_time_timestamp():
date_types = [pa.date32(), pa.date64()]
time_types = [pa.time32('s'), pa.time64('ns')]
timestamp_types = [pa.timestamp('ms')]
duration_types = [pa.duration('ms')]
for case in date_types + time_types + timestamp_types + duration_types:
assert types.is_temporal(case)
for case in date_types:
assert types.is_date(case)
assert not types.is_time(case)
assert not types.is_timestamp(case)
assert not types.is_duration(case)
for case in time_types:
assert types.is_time(case)
assert not types.is_date(case)
assert not types.is_timestamp(case)
assert not types.is_duration(case)
for case in timestamp_types:
assert types.is_timestamp(case)
assert not types.is_date(case)
assert not types.is_time(case)
assert not types.is_duration(case)
for case in duration_types:
assert types.is_duration(case)
assert not types.is_date(case)
assert not types.is_time(case)
assert not types.is_timestamp(case)
assert not types.is_temporal(pa.int32())
def test_is_primitive():
assert types.is_primitive(pa.int32())
assert not types.is_primitive(pa.list_(pa.int32()))
@pytest.mark.parametrize(('tz', 'expected'), [
(pytz.utc, 'UTC'),
(pytz.timezone('Europe/Paris'), 'Europe/Paris'),
# StaticTzInfo.tzname returns with '-09' so we need to infer the timezone's
# name from the tzinfo.zone attribute
(pytz.timezone('Etc/GMT-9'), 'Etc/GMT-9'),
(pytz.FixedOffset(180), '+03:00'),
(datetime.timezone.utc, 'UTC' if sys.version_info >= (3, 6) else '+00:00'),
(datetime.timezone(datetime.timedelta(hours=1, minutes=30)), '+01:30')
])
def test_tzinfo_to_string(tz, expected):
assert pa.lib.tzinfo_to_string(tz) == expected
def test_tzinfo_to_string_errors():
msg = "Not an instance of datetime.tzinfo"
with pytest.raises(TypeError):
pa.lib.tzinfo_to_string("Europe/Budapest")
if sys.version_info >= (3, 8):
# before 3.8 it was only possible to create timezone objects with whole
# number of minutes
tz = datetime.timezone(datetime.timedelta(hours=1, seconds=30))
msg = "Offset must represent whole number of minutes"
with pytest.raises(ValueError, match=msg):
pa.lib.tzinfo_to_string(tz)
@h.given(tzst.timezones())
def test_pytz_timezone_roundtrip(tz):
timezone_string = pa.lib.tzinfo_to_string(tz)
timezone_tzinfo = pa.lib.string_to_tzinfo(timezone_string)
assert timezone_tzinfo == tz
def test_convert_custom_tzinfo_objects_to_string():
class CorrectTimezone1(datetime.tzinfo):
"""
Conversion is using utcoffset()
"""
def tzname(self, dt):
return None
def utcoffset(self, dt):
return datetime.timedelta(hours=-3, minutes=30)
class CorrectTimezone2(datetime.tzinfo):
"""
Conversion is using tzname()
"""
def tzname(self, dt):
return "+03:00"
def utcoffset(self, dt):
return datetime.timedelta(hours=3)
class BuggyTimezone1(datetime.tzinfo):
"""
Unable to infer name or offset
"""
def tzname(self, dt):
return None
def utcoffset(self, dt):
return None
class BuggyTimezone2(datetime.tzinfo):
"""
Wrong offset type
"""
def tzname(self, dt):
return None
def utcoffset(self, dt):
return "one hour"
class BuggyTimezone3(datetime.tzinfo):
"""
Wrong timezone name type
"""
def tzname(self, dt):
return 240
def utcoffset(self, dt):
return None
assert pa.lib.tzinfo_to_string(CorrectTimezone1()) == "-02:30"
assert pa.lib.tzinfo_to_string(CorrectTimezone2()) == "+03:00"
msg = (r"Object returned by tzinfo.utcoffset\(None\) is not an instance "
r"of datetime.timedelta")
for wrong in [BuggyTimezone1(), BuggyTimezone2(), BuggyTimezone3()]:
with pytest.raises(ValueError, match=msg):
pa.lib.tzinfo_to_string(wrong)
@pytest.mark.parametrize(('string', 'expected'), [
('UTC', pytz.utc),
('Europe/Paris', pytz.timezone('Europe/Paris')),
('+03:00', pytz.FixedOffset(180)),
('+01:30', pytz.FixedOffset(90)),
('-02:00', pytz.FixedOffset(-120))
])
def test_string_to_tzinfo(string, expected):
result = pa.lib.string_to_tzinfo(string)
assert result == expected
@pytest.mark.parametrize('tz,name', [
(pytz.FixedOffset(90), '+01:30'),
(pytz.FixedOffset(-90), '-01:30'),
(pytz.utc, 'UTC'),
(pytz.timezone('America/New_York'), 'America/New_York')
])
def test_timezone_string_roundtrip(tz, name):
assert pa.lib.tzinfo_to_string(tz) == name
assert pa.lib.string_to_tzinfo(name) == tz
def test_timestamp():
for unit in ('s', 'ms', 'us', 'ns'):
for tz in (None, 'UTC', 'Europe/Paris'):
ty = pa.timestamp(unit, tz=tz)
assert ty.unit == unit
assert ty.tz == tz
for invalid_unit in ('m', 'arbit', 'rary'):
with pytest.raises(ValueError, match='Invalid TimeUnit string'):
pa.timestamp(invalid_unit)
def test_time32_units():
for valid_unit in ('s', 'ms'):
ty = pa.time32(valid_unit)
assert ty.unit == valid_unit
for invalid_unit in ('m', 'us', 'ns'):
error_msg = 'Invalid TimeUnit for time32: {}'.format(invalid_unit)
with pytest.raises(ValueError, match=error_msg):
pa.time32(invalid_unit)
def test_time64_units():
for valid_unit in ('us', 'ns'):
ty = pa.time64(valid_unit)
assert ty.unit == valid_unit
for invalid_unit in ('m', 's', 'ms'):
error_msg = 'Invalid TimeUnit for time64: {}'.format(invalid_unit)
with pytest.raises(ValueError, match=error_msg):
pa.time64(invalid_unit)
def test_duration():
for unit in ('s', 'ms', 'us', 'ns'):
ty = pa.duration(unit)
assert ty.unit == unit
for invalid_unit in ('m', 'arbit', 'rary'):
with pytest.raises(ValueError, match='Invalid TimeUnit string'):
pa.duration(invalid_unit)
def test_list_type():
ty = pa.list_(pa.int64())
assert isinstance(ty, pa.ListType)
assert ty.value_type == pa.int64()
with pytest.raises(TypeError):
pa.list_(None)
def test_large_list_type():
ty = pa.large_list(pa.utf8())
assert isinstance(ty, pa.LargeListType)
assert ty.value_type == pa.utf8()
with pytest.raises(TypeError):
pa.large_list(None)
def test_map_type():
ty = pa.map_(pa.utf8(), pa.int32())
assert isinstance(ty, pa.MapType)
assert ty.key_type == pa.utf8()
assert ty.item_type == pa.int32()
with pytest.raises(TypeError):
pa.map_(None)
with pytest.raises(TypeError):
pa.map_(pa.int32(), None)
def test_fixed_size_list_type():
ty = pa.list_(pa.float64(), 2)
assert isinstance(ty, pa.FixedSizeListType)
assert ty.value_type == pa.float64()
assert ty.list_size == 2
with pytest.raises(ValueError):
pa.list_(pa.float64(), -2)
def test_struct_type():
fields = [
# Duplicate field name on purpose
pa.field('a', pa.int64()),
pa.field('a', pa.int32()),
pa.field('b', pa.int32())
]
ty = pa.struct(fields)
assert len(ty) == ty.num_fields == 3
assert list(ty) == fields
assert ty[0].name == 'a'
assert ty[2].type == pa.int32()
with pytest.raises(IndexError):
assert ty[3]
assert ty['b'] == ty[2]
# Not found
with pytest.raises(KeyError):
ty['c']
# Neither integer nor string
with pytest.raises(TypeError):
ty[None]
for a, b in zip(ty, fields):
a == b
# Construct from list of tuples
ty = pa.struct([('a', pa.int64()),
('a', pa.int32()),
('b', pa.int32())])
assert list(ty) == fields
for a, b in zip(ty, fields):
a == b
# Construct from mapping
fields = [pa.field('a', pa.int64()),
pa.field('b', pa.int32())]
ty = pa.struct(OrderedDict([('a', pa.int64()),
('b', pa.int32())]))
assert list(ty) == fields
for a, b in zip(ty, fields):
a == b
# Invalid args
with pytest.raises(TypeError):
pa.struct([('a', None)])
def test_struct_duplicate_field_names():
fields = [
pa.field('a', pa.int64()),
pa.field('b', pa.int32()),
pa.field('a', pa.int32())
]
ty = pa.struct(fields)
# Duplicate
with pytest.warns(UserWarning):
with pytest.raises(KeyError):
ty['a']
# StructType::GetFieldIndex
assert ty.get_field_index('a') == -1
# StructType::GetAllFieldIndices
assert ty.get_all_field_indices('a') == [0, 2]
def test_union_type():
def check_fields(ty, fields):
assert ty.num_fields == len(fields)
assert [ty[i] for i in range(ty.num_fields)] == fields
fields = [pa.field('x', pa.list_(pa.int32())),
pa.field('y', pa.binary())]
type_codes = [5, 9]
for mode in ('sparse', pa.lib.UnionMode_SPARSE):
ty = pa.union(fields, mode=mode)
assert ty.mode == 'sparse'
check_fields(ty, fields)
assert ty.type_codes == [0, 1]
ty = pa.union(fields, mode=mode, type_codes=type_codes)
assert ty.mode == 'sparse'
check_fields(ty, fields)
assert ty.type_codes == type_codes
# Invalid number of type codes
with pytest.raises(ValueError):
pa.union(fields, mode=mode, type_codes=type_codes[1:])
for mode in ('dense', pa.lib.UnionMode_DENSE):
ty = pa.union(fields, mode=mode)
assert ty.mode == 'dense'
check_fields(ty, fields)
assert ty.type_codes == [0, 1]
ty = pa.union(fields, mode=mode, type_codes=type_codes)
assert ty.mode == 'dense'
check_fields(ty, fields)
assert ty.type_codes == type_codes
# Invalid number of type codes
with pytest.raises(ValueError):
pa.union(fields, mode=mode, type_codes=type_codes[1:])
for mode in ('unknown', 2):
with pytest.raises(ValueError, match='Invalid union mode'):
pa.union(fields, mode=mode)
def test_dictionary_type():
ty0 = pa.dictionary(pa.int32(), pa.string())
assert ty0.index_type == pa.int32()
assert ty0.value_type == pa.string()
assert ty0.ordered is False
ty1 = pa.dictionary(pa.int8(), pa.float64(), ordered=True)
assert ty1.index_type == pa.int8()
assert ty1.value_type == pa.float64()
assert ty1.ordered is True
# construct from non-arrow objects
ty2 = pa.dictionary('int8', 'string')
assert ty2.index_type == pa.int8()
assert ty2.value_type == pa.string()
assert ty2.ordered is False
# invalid index type raises
with pytest.raises(TypeError):
pa.dictionary(pa.string(), pa.int64())
with pytest.raises(TypeError):
pa.dictionary(pa.uint32(), pa.string())
def test_dictionary_ordered_equals():
# Python side checking of ARROW-6345
d1 = pa.dictionary('int32', 'binary', ordered=True)
d2 = pa.dictionary('int32', 'binary', ordered=False)
d3 = pa.dictionary('int8', 'binary', ordered=True)
d4 = pa.dictionary('int32', 'binary', ordered=True)
assert not d1.equals(d2)
assert not d1.equals(d3)
assert d1.equals(d4)
def test_types_hashable():
many_types = get_many_types()
in_dict = {}
for i, type_ in enumerate(many_types):
assert hash(type_) == hash(type_)
in_dict[type_] = i
assert len(in_dict) == len(many_types)
for i, type_ in enumerate(many_types):
assert in_dict[type_] == i
def test_types_picklable():
for ty in get_many_types():
data = pickle.dumps(ty)
assert pickle.loads(data) == ty
def test_types_weakref():
for ty in get_many_types():
wr = weakref.ref(ty)
assert wr() is not None
# Note that ty may be a singleton and therefore outlive this loop
wr = weakref.ref(pa.int32())
assert wr() is not None # singleton
wr = weakref.ref(pa.list_(pa.int32()))
assert wr() is None # not a singleton
def test_fields_hashable():
in_dict = {}
fields = [pa.field('a', pa.int32()),
pa.field('a', pa.int64()),
pa.field('a', pa.int64(), nullable=False),
pa.field('b', pa.int32()),
pa.field('b', pa.int32(), nullable=False)]
for i, field in enumerate(fields):
in_dict[field] = i
assert len(in_dict) == len(fields)
for i, field in enumerate(fields):
assert in_dict[field] == i
def test_fields_weakrefable():
field = pa.field('a', pa.int32())
wr = weakref.ref(field)
assert wr() is not None
del field
assert wr() is None
@pytest.mark.parametrize('t,check_func', [
(pa.date32(), types.is_date32),
(pa.date64(), types.is_date64),
(pa.time32('s'), types.is_time32),
(pa.time64('ns'), types.is_time64),
(pa.int8(), types.is_int8),
(pa.int16(), types.is_int16),
(pa.int32(), types.is_int32),
(pa.int64(), types.is_int64),
(pa.uint8(), types.is_uint8),
(pa.uint16(), types.is_uint16),
(pa.uint32(), types.is_uint32),
(pa.uint64(), types.is_uint64),
(pa.float16(), types.is_float16),
(pa.float32(), types.is_float32),
(pa.float64(), types.is_float64)
])
def test_exact_primitive_types(t, check_func):
assert check_func(t)
def test_type_id():
# enum values are not exposed publicly
for ty in get_many_types():
assert isinstance(ty.id, int)
def test_bit_width():
for ty, expected in [(pa.bool_(), 1),
(pa.int8(), 8),
(pa.uint32(), 32),
(pa.float16(), 16),
(pa.decimal128(19, 4), 128),
(pa.decimal256(76, 38), 256),
(pa.binary(42), 42 * 8)]:
assert ty.bit_width == expected
for ty in [pa.binary(), pa.string(), pa.list_(pa.int16())]:
with pytest.raises(ValueError, match="fixed width"):
ty.bit_width
def test_fixed_size_binary_byte_width():
ty = pa.binary(5)
assert ty.byte_width == 5
def test_decimal_properties():
ty = pa.decimal128(19, 4)
assert ty.byte_width == 16
assert ty.precision == 19
assert ty.scale == 4
ty = pa.decimal256(76, 38)
assert ty.byte_width == 32
assert ty.precision == 76
assert ty.scale == 38
def test_decimal_overflow():
pa.decimal128(1, 0)
pa.decimal128(38, 0)
for i in (0, -1, 39):
with pytest.raises(ValueError):
pa.decimal128(i, 0)
pa.decimal256(1, 0)
pa.decimal256(76, 0)
for i in (0, -1, 77):
with pytest.raises(ValueError):
pa.decimal256(i, 0)
def test_type_equality_operators():
many_types = get_many_types()
non_pyarrow = ('foo', 16, {'s', 'e', 't'})
for index, ty in enumerate(many_types):
# could use two parametrization levels,
# but that'd bloat pytest's output
for i, other in enumerate(many_types + non_pyarrow):
if i == index:
assert ty == other
else:
assert ty != other
def test_key_value_metadata():
m = pa.KeyValueMetadata({'a': 'A', 'b': 'B'})
assert len(m) == 2
assert m['a'] == b'A'
assert m[b'a'] == b'A'
assert m['b'] == b'B'
assert 'a' in m
assert b'a' in m
assert 'c' not in m
m1 = pa.KeyValueMetadata({'a': 'A', 'b': 'B'})
m2 = pa.KeyValueMetadata(a='A', b='B')
m3 = pa.KeyValueMetadata([('a', 'A'), ('b', 'B')])
assert m1 != 2
assert m1 == m2
assert m2 == m3
assert m1 == {'a': 'A', 'b': 'B'}
assert m1 != {'a': 'A', 'b': 'C'}
with pytest.raises(TypeError):
pa.KeyValueMetadata({'a': 1})
with pytest.raises(TypeError):
pa.KeyValueMetadata({1: 'a'})
with pytest.raises(TypeError):
pa.KeyValueMetadata(a=1)
expected = [(b'a', b'A'), (b'b', b'B')]
result = [(k, v) for k, v in m3.items()]
assert result == expected
assert list(m3.items()) == expected
assert list(m3.keys()) == [b'a', b'b']
assert list(m3.values()) == [b'A', b'B']
assert len(m3) == 2
# test duplicate key support
md = pa.KeyValueMetadata([
('a', 'alpha'),
('b', 'beta'),
('a', 'Alpha'),
('a', 'ALPHA'),
])
expected = [
(b'a', b'alpha'),
(b'b', b'beta'),
(b'a', b'Alpha'),
(b'a', b'ALPHA')
]
assert len(md) == 4
assert isinstance(md.keys(), Iterator)
assert isinstance(md.values(), Iterator)
assert isinstance(md.items(), Iterator)
assert list(md.items()) == expected
assert list(md.keys()) == [k for k, _ in expected]
assert list(md.values()) == [v for _, v in expected]
# first occurrence
assert md['a'] == b'alpha'
assert md['b'] == b'beta'
assert md.get_all('a') == [b'alpha', b'Alpha', b'ALPHA']
assert md.get_all('b') == [b'beta']
assert md.get_all('unkown') == []
with pytest.raises(KeyError):
md = pa.KeyValueMetadata([
('a', 'alpha'),
('b', 'beta'),
('a', 'Alpha'),
('a', 'ALPHA'),
], b='BETA')
def test_key_value_metadata_duplicates():
meta = pa.KeyValueMetadata({'a': '1', 'b': '2'})
with pytest.raises(KeyError):
pa.KeyValueMetadata(meta, a='3')
def test_field_basic():
t = pa.string()
f = pa.field('foo', t)
assert f.name == 'foo'
assert f.nullable
assert f.type is t
assert repr(f) == "pyarrow.Field<foo: string>"
f = pa.field('foo', t, False)
assert not f.nullable
with pytest.raises(TypeError):
pa.field('foo', None)
def test_field_equals():
meta1 = {b'foo': b'bar'}
meta2 = {b'bizz': b'bazz'}
f1 = pa.field('a', pa.int8(), nullable=True)
f2 = pa.field('a', pa.int8(), nullable=True)
f3 = pa.field('a', pa.int8(), nullable=False)
f4 = pa.field('a', pa.int16(), nullable=False)
f5 = pa.field('b', pa.int16(), nullable=False)
f6 = pa.field('a', pa.int8(), nullable=True, metadata=meta1)
f7 = pa.field('a', pa.int8(), nullable=True, metadata=meta1)
f8 = pa.field('a', pa.int8(), nullable=True, metadata=meta2)
assert f1.equals(f2)
assert f6.equals(f7)
assert not f1.equals(f3)
assert not f1.equals(f4)
assert not f3.equals(f4)
assert not f4.equals(f5)
# No metadata in f1, but metadata in f6
assert f1.equals(f6)
assert not f1.equals(f6, check_metadata=True)
# Different metadata
assert f6.equals(f7)
assert f7.equals(f8)
assert not f7.equals(f8, check_metadata=True)
def test_field_equality_operators():
f1 = pa.field('a', pa.int8(), nullable=True)
f2 = pa.field('a', pa.int8(), nullable=True)
f3 = pa.field('b', pa.int8(), nullable=True)
f4 = pa.field('b', pa.int8(), nullable=False)
assert f1 == f2
assert f1 != f3
assert f3 != f4
assert f1 != 'foo'
def test_field_metadata():
f1 = pa.field('a', pa.int8())
f2 = pa.field('a', pa.int8(), metadata={})
f3 = pa.field('a', pa.int8(), metadata={b'bizz': b'bazz'})
assert f1.metadata is None
assert f2.metadata == {}
assert f3.metadata[b'bizz'] == b'bazz'
def test_field_add_remove_metadata():
import collections
f0 = pa.field('foo', pa.int32())
assert f0.metadata is None
metadata = {b'foo': b'bar', b'pandas': b'badger'}
metadata2 = collections.OrderedDict([
(b'a', b'alpha'),
(b'b', b'beta')
])
f1 = f0.with_metadata(metadata)
assert f1.metadata == metadata
f2 = f0.with_metadata(metadata2)
assert f2.metadata == metadata2
with pytest.raises(TypeError):
f0.with_metadata([1, 2, 3])
f3 = f1.remove_metadata()
assert f3.metadata is None
# idempotent
f4 = f3.remove_metadata()
assert f4.metadata is None
f5 = pa.field('foo', pa.int32(), True, metadata)
f6 = f0.with_metadata(metadata)
assert f5.equals(f6)
def test_field_modified_copies():
f0 = pa.field('foo', pa.int32(), True)
f0_ = pa.field('foo', pa.int32(), True)
assert f0.equals(f0_)
f1 = pa.field('foo', pa.int64(), True)
f1_ = f0.with_type(pa.int64())
assert f1.equals(f1_)
# Original instance is unmodified
assert f0.equals(f0_)
f2 = pa.field('foo', pa.int32(), False)
f2_ = f0.with_nullable(False)
assert f2.equals(f2_)
# Original instance is unmodified
assert f0.equals(f0_)
f3 = pa.field('bar', pa.int32(), True)
f3_ = f0.with_name('bar')
assert f3.equals(f3_)
# Original instance is unmodified
assert f0.equals(f0_)
def test_is_integer_value():
assert pa.types.is_integer_value(1)
assert pa.types.is_integer_value(np.int64(1))
assert not pa.types.is_integer_value('1')
def test_is_float_value():
assert not pa.types.is_float_value(1)
assert pa.types.is_float_value(1.)
assert pa.types.is_float_value(np.float64(1))
assert not pa.types.is_float_value('1.0')
def test_is_boolean_value():
assert not pa.types.is_boolean_value(1)
assert pa.types.is_boolean_value(True)
assert pa.types.is_boolean_value(False)
assert pa.types.is_boolean_value(np.bool_(True))
assert pa.types.is_boolean_value(np.bool_(False))
@h.given(
past.all_types |
past.all_fields |
past.all_schemas
)
@h.example(
pa.field(name='', type=pa.null(), metadata={'0': '', '': ''})
)
def test_pickling(field):
data = pickle.dumps(field)
assert pickle.loads(data) == field
@h.given(
st.lists(past.all_types) |
st.lists(past.all_fields) |
st.lists(past.all_schemas)
)
def test_hashing(items):
h.assume(
# well, this is still O(n^2), but makes the input unique
all(not a.equals(b) for i, a in enumerate(items) for b in items[:i])
)
container = {}
for i, item in enumerate(items):
assert hash(item) == hash(item)
container[item] = i
assert len(container) == len(items)
for i, item in enumerate(items):
assert container[item] == i
| apache-2.0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.