repo_name
stringlengths 6
92
| path
stringlengths 4
191
| copies
stringclasses 322
values | size
stringlengths 4
6
| content
stringlengths 821
753k
| license
stringclasses 15
values |
---|---|---|---|---|---|
nomadcube/scikit-learn | examples/applications/topics_extraction_with_nmf.py | 106 | 2313 | """
========================================================
Topics extraction with Non-Negative Matrix Factorization
========================================================
This is a proof of concept application of Non Negative Matrix
Factorization of the term frequency matrix of a corpus of documents so
as to extract an additive model of the topic structure of the corpus.
The output is a list of topics, each represented as a list of terms
(weights are not shown).
The default parameters (n_samples / n_features / n_topics) should make
the example runnable in a couple of tens of seconds. You can try to
increase the dimensions of the problem, but be aware than the time complexity
is polynomial.
"""
# Author: Olivier Grisel <[email protected]>
# Lars Buitinck <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from time import time
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import NMF
from sklearn.datasets import fetch_20newsgroups
n_samples = 2000
n_features = 1000
n_topics = 10
n_top_words = 20
# Load the 20 newsgroups dataset and vectorize it. We use a few heuristics
# to filter out useless terms early on: the posts are stripped of headers,
# footers and quoted replies, and common English words, words occurring in
# only one document or in at least 95% of the documents are removed.
t0 = time()
print("Loading dataset and extracting TF-IDF features...")
dataset = fetch_20newsgroups(shuffle=True, random_state=1,
remove=('headers', 'footers', 'quotes'))
vectorizer = TfidfVectorizer(max_df=0.95, min_df=2, max_features=n_features,
stop_words='english')
tfidf = vectorizer.fit_transform(dataset.data[:n_samples])
print("done in %0.3fs." % (time() - t0))
# Fit the NMF model
print("Fitting the NMF model with n_samples=%d and n_features=%d..."
% (n_samples, n_features))
nmf = NMF(n_components=n_topics, random_state=1).fit(tfidf)
print("done in %0.3fs." % (time() - t0))
feature_names = vectorizer.get_feature_names()
for topic_idx, topic in enumerate(nmf.components_):
print("Topic #%d:" % topic_idx)
print(" ".join([feature_names[i]
for i in topic.argsort()[:-n_top_words - 1:-1]]))
print()
| bsd-3-clause |
pratapvardhan/scikit-learn | examples/linear_model/plot_sparse_recovery.py | 70 | 7486 | """
============================================================
Sparse recovery: feature selection for sparse linear models
============================================================
Given a small number of observations, we want to recover which features
of X are relevant to explain y. For this :ref:`sparse linear models
<l1_feature_selection>` can outperform standard statistical tests if the
true model is sparse, i.e. if a small fraction of the features are
relevant.
As detailed in :ref:`the compressive sensing notes
<compressive_sensing>`, the ability of L1-based approach to identify the
relevant variables depends on the sparsity of the ground truth, the
number of samples, the number of features, the conditioning of the
design matrix on the signal subspace, the amount of noise, and the
absolute value of the smallest non-zero coefficient [Wainwright2006]
(http://statistics.berkeley.edu/sites/default/files/tech-reports/709.pdf).
Here we keep all parameters constant and vary the conditioning of the
design matrix. For a well-conditioned design matrix (small mutual
incoherence) we are exactly in compressive sensing conditions (i.i.d
Gaussian sensing matrix), and L1-recovery with the Lasso performs very
well. For an ill-conditioned matrix (high mutual incoherence),
regressors are very correlated, and the Lasso randomly selects one.
However, randomized-Lasso can recover the ground truth well.
In each situation, we first vary the alpha parameter setting the sparsity
of the estimated model and look at the stability scores of the randomized
Lasso. This analysis, knowing the ground truth, shows an optimal regime
in which relevant features stand out from the irrelevant ones. If alpha
is chosen too small, non-relevant variables enter the model. On the
opposite, if alpha is selected too large, the Lasso is equivalent to
stepwise regression, and thus brings no advantage over a univariate
F-test.
In a second time, we set alpha and compare the performance of different
feature selection methods, using the area under curve (AUC) of the
precision-recall.
"""
print(__doc__)
# Author: Alexandre Gramfort and Gael Varoquaux
# License: BSD 3 clause
import warnings
import matplotlib.pyplot as plt
import numpy as np
from scipy import linalg
from sklearn.linear_model import (RandomizedLasso, lasso_stability_path,
LassoLarsCV)
from sklearn.feature_selection import f_regression
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import auc, precision_recall_curve
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.utils.extmath import pinvh
from sklearn.exceptions import ConvergenceWarning
def mutual_incoherence(X_relevant, X_irelevant):
"""Mutual incoherence, as defined by formula (26a) of [Wainwright2006].
"""
projector = np.dot(np.dot(X_irelevant.T, X_relevant),
pinvh(np.dot(X_relevant.T, X_relevant)))
return np.max(np.abs(projector).sum(axis=1))
for conditioning in (1, 1e-4):
###########################################################################
# Simulate regression data with a correlated design
n_features = 501
n_relevant_features = 3
noise_level = .2
coef_min = .2
# The Donoho-Tanner phase transition is around n_samples=25: below we
# will completely fail to recover in the well-conditioned case
n_samples = 25
block_size = n_relevant_features
rng = np.random.RandomState(42)
# The coefficients of our model
coef = np.zeros(n_features)
coef[:n_relevant_features] = coef_min + rng.rand(n_relevant_features)
# The correlation of our design: variables correlated by blocs of 3
corr = np.zeros((n_features, n_features))
for i in range(0, n_features, block_size):
corr[i:i + block_size, i:i + block_size] = 1 - conditioning
corr.flat[::n_features + 1] = 1
corr = linalg.cholesky(corr)
# Our design
X = rng.normal(size=(n_samples, n_features))
X = np.dot(X, corr)
# Keep [Wainwright2006] (26c) constant
X[:n_relevant_features] /= np.abs(
linalg.svdvals(X[:n_relevant_features])).max()
X = StandardScaler().fit_transform(X.copy())
# The output variable
y = np.dot(X, coef)
y /= np.std(y)
# We scale the added noise as a function of the average correlation
# between the design and the output variable
y += noise_level * rng.normal(size=n_samples)
mi = mutual_incoherence(X[:, :n_relevant_features],
X[:, n_relevant_features:])
###########################################################################
# Plot stability selection path, using a high eps for early stopping
# of the path, to save computation time
alpha_grid, scores_path = lasso_stability_path(X, y, random_state=42,
eps=0.05)
plt.figure()
# We plot the path as a function of alpha/alpha_max to the power 1/3: the
# power 1/3 scales the path less brutally than the log, and enables to
# see the progression along the path
hg = plt.plot(alpha_grid[1:] ** .333, scores_path[coef != 0].T[1:], 'r')
hb = plt.plot(alpha_grid[1:] ** .333, scores_path[coef == 0].T[1:], 'k')
ymin, ymax = plt.ylim()
plt.xlabel(r'$(\alpha / \alpha_{max})^{1/3}$')
plt.ylabel('Stability score: proportion of times selected')
plt.title('Stability Scores Path - Mutual incoherence: %.1f' % mi)
plt.axis('tight')
plt.legend((hg[0], hb[0]), ('relevant features', 'irrelevant features'),
loc='best')
###########################################################################
# Plot the estimated stability scores for a given alpha
# Use 6-fold cross-validation rather than the default 3-fold: it leads to
# a better choice of alpha:
# Stop the user warnings outputs- they are not necessary for the example
# as it is specifically set up to be challenging.
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
warnings.simplefilter('ignore', ConvergenceWarning)
lars_cv = LassoLarsCV(cv=6).fit(X, y)
# Run the RandomizedLasso: we use a paths going down to .1*alpha_max
# to avoid exploring the regime in which very noisy variables enter
# the model
alphas = np.linspace(lars_cv.alphas_[0], .1 * lars_cv.alphas_[0], 6)
clf = RandomizedLasso(alpha=alphas, random_state=42).fit(X, y)
trees = ExtraTreesRegressor(100).fit(X, y)
# Compare with F-score
F, _ = f_regression(X, y)
plt.figure()
for name, score in [('F-test', F),
('Stability selection', clf.scores_),
('Lasso coefs', np.abs(lars_cv.coef_)),
('Trees', trees.feature_importances_),
]:
precision, recall, thresholds = precision_recall_curve(coef != 0,
score)
plt.semilogy(np.maximum(score / np.max(score), 1e-4),
label="%s. AUC: %.3f" % (name, auc(recall, precision)))
plt.plot(np.where(coef != 0)[0], [2e-4] * n_relevant_features, 'mo',
label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Score")
# Plot only the 100 first coefficients
plt.xlim(0, 100)
plt.legend(loc='best')
plt.title('Feature selection scores - Mutual incoherence: %.1f'
% mi)
plt.show()
| bsd-3-clause |
jjx02230808/project0223 | examples/cluster/plot_kmeans_assumptions.py | 270 | 2040 | """
====================================
Demonstration of k-means assumptions
====================================
This example is meant to illustrate situations where k-means will produce
unintuitive and possibly unexpected clusters. In the first three plots, the
input data does not conform to some implicit assumption that k-means makes and
undesirable clusters are produced as a result. In the last plot, k-means
returns intuitive clusters despite unevenly sized blobs.
"""
print(__doc__)
# Author: Phil Roth <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
plt.figure(figsize=(12, 12))
n_samples = 1500
random_state = 170
X, y = make_blobs(n_samples=n_samples, random_state=random_state)
# Incorrect number of clusters
y_pred = KMeans(n_clusters=2, random_state=random_state).fit_predict(X)
plt.subplot(221)
plt.scatter(X[:, 0], X[:, 1], c=y_pred)
plt.title("Incorrect Number of Blobs")
# Anisotropicly distributed data
transformation = [[ 0.60834549, -0.63667341], [-0.40887718, 0.85253229]]
X_aniso = np.dot(X, transformation)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_aniso)
plt.subplot(222)
plt.scatter(X_aniso[:, 0], X_aniso[:, 1], c=y_pred)
plt.title("Anisotropicly Distributed Blobs")
# Different variance
X_varied, y_varied = make_blobs(n_samples=n_samples,
cluster_std=[1.0, 2.5, 0.5],
random_state=random_state)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_varied)
plt.subplot(223)
plt.scatter(X_varied[:, 0], X_varied[:, 1], c=y_pred)
plt.title("Unequal Variance")
# Unevenly sized blobs
X_filtered = np.vstack((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10]))
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_filtered)
plt.subplot(224)
plt.scatter(X_filtered[:, 0], X_filtered[:, 1], c=y_pred)
plt.title("Unevenly Sized Blobs")
plt.show()
| bsd-3-clause |
fclesio/learning-space | Lightning Talk @Movile - ML with Scikit-Learn/Recipes/MeanShift.py | 1 | 1580 | # Mean Shift: http://scikit-learn.org/stable/auto_examples/cluster/plot_mean_shift.html#example-cluster-plot-mean-shift-py
print(__doc__)
import numpy as np
from sklearn.cluster import MeanShift, estimate_bandwidth
from sklearn.datasets.samples_generator import make_blobs
###############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, _ = make_blobs(n_samples=10000, centers=centers, cluster_std=0.6)
###############################################################################
# Compute clustering with MeanShift
# The following bandwidth can be automatically detected using
bandwidth = estimate_bandwidth(X, quantile=0.2, n_samples=500)
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(X)
labels = ms.labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
print("number of estimated clusters : %d" % n_clusters_)
###############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
my_members = labels == k
cluster_center = cluster_centers[k]
plt.plot(X[my_members, 0], X[my_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| gpl-2.0 |
vigilv/scikit-learn | examples/ensemble/plot_adaboost_multiclass.py | 354 | 4124 | """
=====================================
Multi-class AdaBoosted Decision Trees
=====================================
This example reproduces Figure 1 of Zhu et al [1] and shows how boosting can
improve prediction accuracy on a multi-class problem. The classification
dataset is constructed by taking a ten-dimensional standard normal distribution
and defining three classes separated by nested concentric ten-dimensional
spheres such that roughly equal numbers of samples are in each class (quantiles
of the :math:`\chi^2` distribution).
The performance of the SAMME and SAMME.R [1] algorithms are compared. SAMME.R
uses the probability estimates to update the additive model, while SAMME uses
the classifications only. As the example illustrates, the SAMME.R algorithm
typically converges faster than SAMME, achieving a lower test error with fewer
boosting iterations. The error of each algorithm on the test set after each
boosting iteration is shown on the left, the classification error on the test
set of each tree is shown in the middle, and the boost weight of each tree is
shown on the right. All trees have a weight of one in the SAMME.R algorithm and
therefore are not shown.
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Noel Dawe <[email protected]>
#
# License: BSD 3 clause
from sklearn.externals.six.moves import zip
import matplotlib.pyplot as plt
from sklearn.datasets import make_gaussian_quantiles
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeClassifier
X, y = make_gaussian_quantiles(n_samples=13000, n_features=10,
n_classes=3, random_state=1)
n_split = 3000
X_train, X_test = X[:n_split], X[n_split:]
y_train, y_test = y[:n_split], y[n_split:]
bdt_real = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1)
bdt_discrete = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1.5,
algorithm="SAMME")
bdt_real.fit(X_train, y_train)
bdt_discrete.fit(X_train, y_train)
real_test_errors = []
discrete_test_errors = []
for real_test_predict, discrete_train_predict in zip(
bdt_real.staged_predict(X_test), bdt_discrete.staged_predict(X_test)):
real_test_errors.append(
1. - accuracy_score(real_test_predict, y_test))
discrete_test_errors.append(
1. - accuracy_score(discrete_train_predict, y_test))
n_trees_discrete = len(bdt_discrete)
n_trees_real = len(bdt_real)
# Boosting might terminate early, but the following arrays are always
# n_estimators long. We crop them to the actual number of trees here:
discrete_estimator_errors = bdt_discrete.estimator_errors_[:n_trees_discrete]
real_estimator_errors = bdt_real.estimator_errors_[:n_trees_real]
discrete_estimator_weights = bdt_discrete.estimator_weights_[:n_trees_discrete]
plt.figure(figsize=(15, 5))
plt.subplot(131)
plt.plot(range(1, n_trees_discrete + 1),
discrete_test_errors, c='black', label='SAMME')
plt.plot(range(1, n_trees_real + 1),
real_test_errors, c='black',
linestyle='dashed', label='SAMME.R')
plt.legend()
plt.ylim(0.18, 0.62)
plt.ylabel('Test Error')
plt.xlabel('Number of Trees')
plt.subplot(132)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_errors,
"b", label='SAMME', alpha=.5)
plt.plot(range(1, n_trees_real + 1), real_estimator_errors,
"r", label='SAMME.R', alpha=.5)
plt.legend()
plt.ylabel('Error')
plt.xlabel('Number of Trees')
plt.ylim((.2,
max(real_estimator_errors.max(),
discrete_estimator_errors.max()) * 1.2))
plt.xlim((-20, len(bdt_discrete) + 20))
plt.subplot(133)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_weights,
"b", label='SAMME')
plt.legend()
plt.ylabel('Weight')
plt.xlabel('Number of Trees')
plt.ylim((0, discrete_estimator_weights.max() * 1.2))
plt.xlim((-20, n_trees_discrete + 20))
# prevent overlapping y-axis labels
plt.subplots_adjust(wspace=0.25)
plt.show()
| bsd-3-clause |
isb-cgc/ISB-CGC-data-proc | tcga_etl_pipeline/clin_bio/metadata/parse_clinical_metadata.py | 1 | 2284 | import os
import sys
import re
import hashlib
import json
from cStringIO import StringIO
import pandas as pd
import logging
from HTMLParser import HTMLParser
import datetime
import os.path
from google.cloud import storage
from lxml import etree
from collections import Counter
#--------------------------------------
# set default bucket
#--------------------------------------
storage.set_default_bucket("isb-cgc")
storage_conn = storage.get_connection()
storage.set_default_connection(storage_conn)
all_elements = {}
#--------------------------------------
# get the bucket contents
#--------------------------------------
bucket = storage.get_bucket('isb-cgc-open')
for k in bucket.list_blobs(prefix="tcga/"):
if '.xml' in k.name and 'clinical' in k.name:
print k.name
disease_type = k.name.split("/")[1]
maf_data = StringIO()
k.download_to_file(maf_data)
maf_data.seek(0)
tree = etree.parse(maf_data)
root = tree.getroot() #this is the root; we can use it to find elements
blank_elements = re.compile("^\\n\s*$")
admin_element = root.findall('.//*/[@procurement_status="Completed"]', namespaces=root.nsmap)
maf_data.close()
# ------------------------------------
unique_elements = {}
for i in admin_element:
unique_elements[i.tag.split("}")[1]] = 1
for j in unique_elements:
if disease_type in all_elements:
all_elements[disease_type].append(j)
else:
all_elements[disease_type] = []
all_elements[disease_type].append(j)
for dt in all_elements:
c = dict(Counter(all_elements[dt]))
df = pd.DataFrame(c.items(), columns=["item", "counts"])
df = df.sort(['counts'], ascending=[False])
df_stringIO = df.to_csv(sep='\t', index=False)
# upload the file
upload_bucket = storage.get_bucket('ptone-experiments')
upload_blob = storage.blob.Blob('working-files/clinical_metadata/' + dt + ".counts.txt", bucket=upload_bucket)
upload_blob.upload_from_string(df_stringIO)
#for dt in all_elements:
# c = dict(Counter(all_elements[dt]))
# df = pd.DataFrame(c.items(), columns=["item", "counts"])
# for ele in df[df.counts >= round(int(df.counts.quantile(.70)))]['item']:
# print ele
#
| apache-2.0 |
simpeg/simpegpf | simpegPF/MagAnalytics.py | 1 | 9270 | from scipy.constants import mu_0
from SimPEG import *
from SimPEG.Utils import kron3, speye, sdiag
import matplotlib.pyplot as plt
def spheremodel(mesh, x0, y0, z0, r):
"""
Generate model indicies for sphere
- (x0, y0, z0 ): is the center location of sphere
- r: is the radius of the sphere
- it returns logical indicies of cell-center model
"""
ind = np.sqrt( (mesh.gridCC[:,0]-x0)**2+(mesh.gridCC[:,1]-y0)**2+(mesh.gridCC[:,2]-z0)**2 ) < r
return ind
def MagSphereAnaFun(x, y, z, R, x0, y0, z0, mu1, mu2, H0, flag='total'):
"""
test
Analytic function for Magnetics problem. The set up here is
magnetic sphere in whole-space assuming that the inducing field is oriented in the x-direction.
* (x0,y0,z0)
* (x0, y0, z0 ): is the center location of sphere
* r: is the radius of the sphere
.. math::
\mathbf{H}_0 = H_0\hat{x}
"""
if (~np.size(x)==np.size(y)==np.size(z)):
print "Specify same size of x, y, z"
return
dim = x.shape
x = Utils.mkvc(x)
y = Utils.mkvc(y)
z = Utils.mkvc(z)
ind = np.sqrt((x-x0)**2+(y-y0)**2+(z-z0)**2 ) < R
r = Utils.mkvc(np.sqrt((x-x0)**2+(y-y0)**2+(z-z0)**2 ))
Bx = np.zeros(x.size)
By = np.zeros(x.size)
Bz = np.zeros(x.size)
# Inside of the sphere
rf2 = 3*mu1/(mu2+2*mu1)
if flag is 'total' and any(ind):
Bx[ind] = mu2*H0*(rf2)
elif (flag == 'secondary'):
Bx[ind] = mu2*H0*(rf2)-mu1*H0
By[ind] = 0.
Bz[ind] = 0.
# Outside of the sphere
rf1 = (mu2-mu1)/(mu2+2*mu1)
if (flag == 'total'):
Bx[~ind] = mu1*(H0+H0/r[~ind]**5*(R**3)*rf1*(2*(x[~ind]-x0)**2-(y[~ind]-y0)**2-(z[~ind]-z0)**2))
elif (flag == 'secondary'):
Bx[~ind] = mu1*(H0/r[~ind]**5*(R**3)*rf1*(2*(x[~ind]-x0)**2-(y[~ind]-y0)**2-(z[~ind]-z0)**2))
By[~ind] = mu1*(H0/r[~ind]**5*(R**3)*rf1*(3*(x[~ind]-x0)*(y[~ind]-y0)))
Bz[~ind] = mu1*(H0/r[~ind]**5*(R**3)*rf1*(3*(x[~ind]-x0)*(z[~ind]-z0)))
return np.reshape(Bx, x.shape, order='F'), np.reshape(By, x.shape, order='F'), np.reshape(Bz, x.shape, order='F')
def CongruousMagBC(mesh, Bo, chi):
"""
Computing boundary condition using Congrous sphere method.
This is designed for secondary field formulation.
>> Input
* mesh: Mesh class
* Bo: np.array([Box, Boy, Boz]): Primary magnetic flux
* chi: susceptibility at cell volume
.. math::
\\vec{B}(r) = \\frac{\mu_0}{4\pi} \\frac{m}{ \| \\vec{r} - \\vec{r}_0\|^3}[3\hat{m}\cdot\hat{r}-\hat{m}]
"""
ind = chi > 0.
V = mesh.vol[ind].sum()
gamma = 1/V*(chi*mesh.vol).sum() # like a mass!
Bot = np.sqrt(sum(Bo**2))
mx = Bo[0]/Bot
my = Bo[1]/Bot
mz = Bo[2]/Bot
mom = 1/mu_0*Bot*gamma*V/(1+gamma/3)
xc = sum(chi[ind]*mesh.gridCC[:,0][ind])/sum(chi[ind])
yc = sum(chi[ind]*mesh.gridCC[:,1][ind])/sum(chi[ind])
zc = sum(chi[ind]*mesh.gridCC[:,2][ind])/sum(chi[ind])
indxd, indxu, indyd, indyu, indzd, indzu = mesh.faceBoundaryInd
const = mu_0/(4*np.pi)*mom
rfun = lambda x: np.sqrt((x[:,0]-xc)**2 + (x[:,1]-yc)**2 + (x[:,2]-zc)**2)
mdotrx = (mx*(mesh.gridFx[(indxd|indxu),0]-xc)/rfun(mesh.gridFx[(indxd|indxu),:]) +
my*(mesh.gridFx[(indxd|indxu),1]-yc)/rfun(mesh.gridFx[(indxd|indxu),:]) +
mz*(mesh.gridFx[(indxd|indxu),2]-zc)/rfun(mesh.gridFx[(indxd|indxu),:]))
Bbcx = const/(rfun(mesh.gridFx[(indxd|indxu),:])**3)*(3*mdotrx*(mesh.gridFx[(indxd|indxu),0]-xc)/rfun(mesh.gridFx[(indxd|indxu),:])-mx)
mdotry = (mx*(mesh.gridFy[(indyd|indyu),0]-xc)/rfun(mesh.gridFy[(indyd|indyu),:]) +
my*(mesh.gridFy[(indyd|indyu),1]-yc)/rfun(mesh.gridFy[(indyd|indyu),:]) +
mz*(mesh.gridFy[(indyd|indyu),2]-zc)/rfun(mesh.gridFy[(indyd|indyu),:]))
Bbcy = const/(rfun(mesh.gridFy[(indyd|indyu),:])**3)*(3*mdotry*(mesh.gridFy[(indyd|indyu),1]-yc)/rfun(mesh.gridFy[(indyd|indyu),:])-my)
mdotrz = (mx*(mesh.gridFz[(indzd|indzu),0]-xc)/rfun(mesh.gridFz[(indzd|indzu),:]) +
my*(mesh.gridFz[(indzd|indzu),1]-yc)/rfun(mesh.gridFz[(indzd|indzu),:]) +
mz*(mesh.gridFz[(indzd|indzu),2]-zc)/rfun(mesh.gridFz[(indzd|indzu),:]))
Bbcz = const/(rfun(mesh.gridFz[(indzd|indzu),:])**3)*(3*mdotrz*(mesh.gridFz[(indzd|indzu),2]-zc)/rfun(mesh.gridFz[(indzd|indzu),:])-mz)
return np.r_[Bbcx, Bbcy, Bbcz], (1/gamma-1/(3+gamma))*1/V
def MagSphereAnaFunA(x, y, z, R, xc, yc, zc, chi, Bo, flag):
"""
Computing boundary condition using Congrous sphere method.
This is designed for secondary field formulation.
>> Input
mesh: Mesh class
Bo: np.array([Box, Boy, Boz]): Primary magnetic flux
Chi: susceptibility at cell volume
.. math::
\\vec{B}(r) = \\frac{\mu_0}{4\pi}\\frac{m}{\| \\vec{r}-\\vec{r}_0\|^3}[3\hat{m}\cdot\hat{r}-\hat{m}]
"""
if (~np.size(x)==np.size(y)==np.size(z)):
print "Specify same size of x, y, z"
return
dim = x.shape
x = Utils.mkvc(x)
y = Utils.mkvc(y)
z = Utils.mkvc(z)
Bot = np.sqrt(sum(Bo**2))
mx = Bo[0]/Bot
my = Bo[1]/Bot
mz = Bo[2]/Bot
ind = np.sqrt((x-xc)**2+(y-yc)**2+(z-zc)**2 ) < R
Bx = np.zeros(x.size)
By = np.zeros(x.size)
Bz = np.zeros(x.size)
# Inside of the sphere
rf2 = 3/(chi+3)*(1+chi)
if (flag == 'total'):
Bx[ind] = Bo[0]*(rf2)
By[ind] = Bo[1]*(rf2)
Bz[ind] = Bo[2]*(rf2)
elif (flag == 'secondary'):
Bx[ind] = Bo[0]*(rf2)-Bo[0]
By[ind] = Bo[1]*(rf2)-Bo[1]
Bz[ind] = Bo[2]*(rf2)-Bo[2]
r = Utils.mkvc(np.sqrt((x-xc)**2+(y-yc)**2+(z-zc)**2 ))
V = 4*np.pi*R**3/3
mom = Bot/mu_0*chi/(1+chi/3)*V
const = mu_0/(4*np.pi)*mom
mdotr = (mx*(x[~ind]-xc)/r[~ind] + my*(y[~ind]-yc)/r[~ind] + mz*(z[~ind]-zc)/r[~ind])
Bx[~ind] = const/(r[~ind]**3)*(3*mdotr*(x[~ind]-xc)/r[~ind]-mx)
By[~ind] = const/(r[~ind]**3)*(3*mdotr*(y[~ind]-yc)/r[~ind]-my)
Bz[~ind] = const/(r[~ind]**3)*(3*mdotr*(z[~ind]-zc)/r[~ind]-mz)
return Bx, By, Bz
def IDTtoxyz(Inc, Dec, Btot):
"""
Convert from Inclination, Declination, Total intensity of earth field to x, y, z
"""
Bx = Btot*np.cos(Inc/180.*np.pi)*np.sin(Dec/180.*np.pi)
By = Btot*np.cos(Inc/180.*np.pi)*np.cos(Dec/180.*np.pi)
Bz = -Btot*np.sin(Inc/180.*np.pi)
return np.r_[Bx, By, Bz]
def MagSphereFreeSpace(x, y, z, R, xc, yc, zc, chi, Bo):
"""
Computing boundary condition using Congrous sphere method.
This is designed for secondary field formulation.
>> Input
mesh: Mesh class
Bo: np.array([Box, Boy, Boz]): Primary magnetic flux
Chi: susceptibility at cell volume
.. math::
\\vec{B}(r) = \\frac{\mu_0}{4\pi}\\frac{m}{\| \\vec{r}-\\vec{r}_0\|^3}[3\hat{m}\cdot\hat{r}-\hat{m}]
"""
if (~np.size(x)==np.size(y)==np.size(z)):
print "Specify same size of x, y, z"
return
x = Utils.mkvc(x)
y = Utils.mkvc(y)
z = Utils.mkvc(z)
nobs = len(x)
Bot = np.sqrt(sum(Bo**2))
mx = np.ones([nobs]) * Bo[0,0] * R**3 / 3. * chi
my = np.ones([nobs]) * Bo[0,1] * R**3 / 3. * chi
mz = np.ones([nobs]) * Bo[0,2] * R**3 / 3. * chi
M = np.c_[mx, my, mz]
rx = (x - xc)
ry = (y - yc)
rz = (zc - z)
rvec = np.c_[rx, ry, rz]
r = np.sqrt((rx)**2+(ry)**2+(rz)**2 )
B = -Utils.sdiag(1./r**3)*M + Utils.sdiag((3 * np.sum(M*rvec,axis=1))/r**5)*rvec
Bx = B[:,0]
By = B[:,1]
Bz = B[:,2]
return Bx, By, Bz
if __name__ == '__main__':
hxind = [(0,25,1.3),(21, 12.5),(0,25,1.3)]
hyind = [(0,25,1.3),(21, 12.5),(0,25,1.3)]
hzind = [(0,25,1.3),(20, 12.5),(0,25,1.3)]
# hx, hy, hz = Utils.meshTensors(hxind, hyind, hzind)
M3 = Mesh.TensorMesh([hxind, hyind, hzind], "CCC")
indxd, indxu, indyd, indyu, indzd, indzu = M3.faceBoundaryInd
mu0 = 4*np.pi*1e-7
chibkg = 0.
chiblk = 0.01
chi = np.ones(M3.nC)*chibkg
sph_ind = spheremodel(M3, 0, 0, 0, 100)
chi[sph_ind] = chiblk
mu = (1.+chi)*mu0
Bbc, const = CongruousMagBC(M3, np.array([1., 0., 0.]), chi)
flag = 'secondary'
Box = 1.
H0 = Box/mu_0
Bbcxx, Bbcxy, Bbcxz = MagSphereAnaFun(M3.gridFx[(indxd|indxu),0], M3.gridFx[(indxd|indxu),1], M3.gridFx[(indxd|indxu),2], 100, 0., 0., 0., mu_0, mu_0*(1+chiblk), H0, flag)
Bbcyx, Bbcyy, Bbcyz = MagSphereAnaFun(M3.gridFy[(indyd|indyu),0], M3.gridFy[(indyd|indyu),1], M3.gridFy[(indyd|indyu),2], 100, 0., 0., 0., mu_0, mu_0*(1+chiblk), H0, flag)
Bbczx, Bbczy, Bbczz = MagSphereAnaFun(M3.gridFz[(indzd|indzu),0], M3.gridFz[(indzd|indzu),1], M3.gridFz[(indzd|indzu),2], 100, 0., 0., 0., mu_0, mu_0*(1+chiblk), H0, flag)
Bbc_ana = np.r_[Bbcxx, Bbcyy, Bbczz]
# fig, ax = plt.subplots(1,1, figsize = (10, 10))
# ax.plot(Bbc_ana)
# ax.plot(Bbc)
# plt.show()
err = np.linalg.norm(Bbc-Bbc_ana)/np.linalg.norm(Bbc_ana)
if err < 0.1:
print 'Mag Boundary computation is valid, err = ', err
else:
print 'Mag Boundary computation is wrong!!, err = ', err
pass
| mit |
nhuntwalker/astroML | book_figures/chapter8/fig_total_least_squares.py | 3 | 4653 | """
Total Least Squares Figure
--------------------------
Figure 8.6
A linear fit to data with correlated errors in x and y. In the literature, this
is often referred to as total least squares or errors-in-variables fitting. The
left panel shows the lines of best fit; the right panel shows the likelihood
contours in slope/intercept space. The points are the same set used for the
examples in Hogg, Bovy & Lang 2010.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from scipy import optimize
from matplotlib import pyplot as plt
from matplotlib.patches import Ellipse
from astroML.linear_model import TLS_logL
from astroML.plotting.mcmc import convert_to_stdev
from astroML.datasets import fetch_hogg2010test
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Define some convenience functions
# translate between typical slope-intercept representation,
# and the normal vector representation
def get_m_b(beta):
b = np.dot(beta, beta) / beta[1]
m = -beta[0] / beta[1]
return m, b
def get_beta(m, b):
denom = (1 + m * m)
return np.array([-b * m / denom, b / denom])
# compute the ellipse pricipal axes and rotation from covariance
def get_principal(sigma_x, sigma_y, rho_xy):
sigma_xy2 = rho_xy * sigma_x * sigma_y
alpha = 0.5 * np.arctan2(2 * sigma_xy2,
(sigma_x ** 2 - sigma_y ** 2))
tmp1 = 0.5 * (sigma_x ** 2 + sigma_y ** 2)
tmp2 = np.sqrt(0.25 * (sigma_x ** 2 - sigma_y ** 2) ** 2 + sigma_xy2 ** 2)
return np.sqrt(tmp1 + tmp2), np.sqrt(tmp1 - tmp2), alpha
# plot ellipses
def plot_ellipses(x, y, sigma_x, sigma_y, rho_xy, factor=2, ax=None):
if ax is None:
ax = plt.gca()
sigma1, sigma2, alpha = get_principal(sigma_x, sigma_y, rho_xy)
for i in range(len(x)):
ax.add_patch(Ellipse((x[i], y[i]),
factor * sigma1[i], factor * sigma2[i],
alpha[i] * 180. / np.pi,
fc='none', ec='k'))
#------------------------------------------------------------
# We'll use the data from table 1 of Hogg et al. 2010
data = fetch_hogg2010test()
data = data[5:] # no outliers
x = data['x']
y = data['y']
sigma_x = data['sigma_x']
sigma_y = data['sigma_y']
rho_xy = data['rho_xy']
#------------------------------------------------------------
# Find best-fit parameters
X = np.vstack((x, y)).T
dX = np.zeros((len(x), 2, 2))
dX[:, 0, 0] = sigma_x ** 2
dX[:, 1, 1] = sigma_y ** 2
dX[:, 0, 1] = dX[:, 1, 0] = rho_xy * sigma_x * sigma_y
min_func = lambda beta: -TLS_logL(beta, X, dX)
beta_fit = optimize.fmin(min_func,
x0=[-1, 1])
#------------------------------------------------------------
# Plot the data and fits
fig = plt.figure(figsize=(5, 2.5))
fig.subplots_adjust(left=0.1, right=0.95, wspace=0.25,
bottom=0.15, top=0.9)
#------------------------------------------------------------
# first let's visualize the data
ax = fig.add_subplot(121)
ax.scatter(x, y, c='k', s=9)
plot_ellipses(x, y, sigma_x, sigma_y, rho_xy, ax=ax)
#------------------------------------------------------------
# plot the best-fit line
m_fit, b_fit = get_m_b(beta_fit)
x_fit = np.linspace(0, 300, 10)
ax.plot(x_fit, m_fit * x_fit + b_fit, '-k')
ax.set_xlim(40, 250)
ax.set_ylim(100, 600)
ax.set_xlabel('$x$')
ax.set_ylabel('$y$')
#------------------------------------------------------------
# plot the likelihood contour in m, b
ax = fig.add_subplot(122)
m = np.linspace(1.7, 2.8, 100)
b = np.linspace(-60, 110, 100)
logL = np.zeros((len(m), len(b)))
for i in range(len(m)):
for j in range(len(b)):
logL[i, j] = TLS_logL(get_beta(m[i], b[j]), X, dX)
ax.contour(m, b, convert_to_stdev(logL.T),
levels=(0.683, 0.955, 0.997),
colors='k')
ax.set_xlabel('slope')
ax.set_ylabel('intercept')
ax.set_xlim(1.7, 2.8)
ax.set_ylim(-60, 110)
plt.show()
| bsd-2-clause |
vshtanko/scikit-learn | examples/linear_model/plot_lasso_lars.py | 363 | 1080 | #!/usr/bin/env python
"""
=====================
Lasso path using LARS
=====================
Computes Lasso Path along the regularization parameter using the LARS
algorithm on the diabetes dataset. Each color represents a different
feature of the coefficient vector, and this is displayed as a function
of the regularization parameter.
"""
print(__doc__)
# Author: Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
print("Computing regularization path using the LARS ...")
alphas, _, coefs = linear_model.lars_path(X, y, method='lasso', verbose=True)
xx = np.sum(np.abs(coefs.T), axis=1)
xx /= xx[-1]
plt.plot(xx, coefs.T)
ymin, ymax = plt.ylim()
plt.vlines(xx, ymin, ymax, linestyle='dashed')
plt.xlabel('|coef| / max|coef|')
plt.ylabel('Coefficients')
plt.title('LASSO Path')
plt.axis('tight')
plt.show()
| bsd-3-clause |
djpine/pyman | Book/chap8/Problems/test.py | 3 | 2735 | import numpy as np
import matplotlib.pyplot as plt
def LineFitWt(x, y, sig):
"""
Returns slope and y-intercept of weighted linear fit to
(x,y) data set.
Inputs: x and y data array and uncertainty array (unc)
for y data set.
Outputs: slope and y-intercept of best fit to data and
uncertainties of slope & y-intercept.
"""
sig2 = sig**2
norm = (1./sig2).sum()
xhat = (x/sig2).sum() / norm
yhat = (y/sig2).sum() / norm
slope = ((x-xhat)*y/sig2).sum()/((x-xhat)*x/sig2).sum()
yint = yhat - slope*xhat
sig2_slope = 1./((x-xhat)*x/sig2).sum()
sig2_yint = sig2_slope * (x*x/sig2).sum() / norm
return slope, yint, np.sqrt(sig2_slope), np.sqrt(sig2_yint)
def redchisq(x, y, dy, slope, yint):
chisq = (((y-yint-slope*x)/dy)**2).sum()
return chisq/float(x.size-2)
# Read data from data file
t, V, dV = np.loadtxt("RLcircuit.txt", skiprows=2, unpack=True)
########## Code to tranform & fit data starts here ##########
# Transform data and parameters from ln V = ln V0 - Gamma t
# to linear form: Y = A + B*X, where Y = ln V, X = t, dY = dV/V
X = t # transform t data for fitting (not needed as X=t)
Y = np.log(V) # transform N data for fitting
dY = dV/V # transform uncertainties for fitting
# Fit transformed data X, Y, dY to obtain fitting parameters
# B & A. Also returns uncertainties dA & dB in B & A
B, A, dB, dA = LineFitWt(X, Y, dY)
# Return reduced chi-squared
redchisqr = redchisq(X, Y, dY, B, A)
# Determine fitting parameters for original exponential function
# N = N0 exp(-Gamma t) ...
V0 = np.exp(A)
Gamma = -B
# ... and their uncertainties
dV0 = V0 * dA
dGamma = dB
###### Code to plot transformed data and fit starts here ######
# Create line corresponding to fit using fitting parameters
# Only two points are needed to specify a straight line
Xext = 0.05*(X.max()-X.min())
Xfit = np.array([X.min()-Xext, X.max()+Xext]) # smallest & largest X points
Yfit = B*Xfit + A # generates Y from X data &
# fitting function
plt.errorbar(X, Y, dY, fmt="b^")
plt.plot(Xfit, Yfit, "c-", zorder=-1)
plt.title(r"$\mathrm{Fit\ to:}\ \ln V = \ln V_0-\Gamma t$ or $Y = A + BX$")
plt.xlabel('time (ns)')
plt.ylabel('ln voltage (volts)')
plt.xlim(-50, 550)
plt.text(210, 1.5, u"A = ln V0 = {0:0.4f} \xb1 {1:0.4f}".format(A, dA))
plt.text(210, 1.1, u"B = -Gamma = {0:0.4f} \xb1 {1:0.4f} /ns".format(B, dB))
plt.text(210, 0.7, "$\chi_r^2$ = {0:0.3f}".format(redchisqr))
plt.text(210, 0.3, u"V0 = {0:0.2f} \xb1 {1:0.2f} V".format(V0, dV0))
plt.text(210, -0.1,u"Gamma = {0:0.4f} \xb1 {1:0.4f} /ns".format(Gamma, dGamma))
plt.show()
plt.savefig("RLcircuit.pdf") | cc0-1.0 |
pv/scikit-learn | sklearn/tests/test_cross_validation.py | 70 | 41943 | """Test the cross_validation module"""
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy import stats
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from sklearn import cross_validation as cval
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_digits
from sklearn.datasets import load_iris
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.externals import six
from sklearn.externals.six.moves import zip
from sklearn.linear_model import Ridge
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer, LabelBinarizer
from sklearn.pipeline import Pipeline
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
y = np.arange(10) // 2
##############################################################################
# Tests
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert_equal(train.intersection(test), set())
if n_samples is not None:
# Check that the union of train an test split cover all the indices
assert_equal(train.union(test), set(range(n_samples)))
def check_cv_coverage(cv, expected_n_iter=None, n_samples=None):
# Check that a all the samples appear at least once in a test fold
if expected_n_iter is not None:
assert_equal(len(cv), expected_n_iter)
else:
expected_n_iter = len(cv)
collected_test_samples = set()
iterations = 0
for train, test in cv:
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert_equal(iterations, expected_n_iter)
if n_samples is not None:
assert_equal(collected_test_samples, set(range(n_samples)))
def test_kfold_valueerrors():
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.KFold, 3, 4)
# Check that a warning is raised if the least populated class has too few
# members.
y = [3, 3, -1, -1, 2]
cv = assert_warns_message(Warning, "The least populated class",
cval.StratifiedKFold, y, 3)
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
check_cv_coverage(cv, expected_n_iter=3, n_samples=len(y))
# Error when number of folds is <= 1
assert_raises(ValueError, cval.KFold, 2, 0)
assert_raises(ValueError, cval.KFold, 2, 1)
assert_raises(ValueError, cval.StratifiedKFold, y, 0)
assert_raises(ValueError, cval.StratifiedKFold, y, 1)
# When n is not integer:
assert_raises(ValueError, cval.KFold, 2.5, 2)
# When n_folds is not integer:
assert_raises(ValueError, cval.KFold, 5, 1.5)
assert_raises(ValueError, cval.StratifiedKFold, y, 1.5)
def test_kfold_indices():
# Check all indices are returned in the test folds
kf = cval.KFold(300, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=300)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
kf = cval.KFold(17, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=17)
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
splits = iter(cval.KFold(4, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = iter(cval.KFold(5, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
splits = iter(cval.StratifiedKFold([1, 1, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
splits = iter(cval.StratifiedKFold([1, 1, 1, 0, 0, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
def test_stratified_kfold_ratios():
# Check that stratified kfold preserves label ratios in individual splits
# Repeat with shuffling turned off and on
n_samples = 1000
labels = np.array([4] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
for shuffle in [False, True]:
for train, test in cval.StratifiedKFold(labels, 5, shuffle=shuffle):
assert_almost_equal(np.sum(labels[train] == 4) / len(train), 0.10,
2)
assert_almost_equal(np.sum(labels[train] == 0) / len(train), 0.89,
2)
assert_almost_equal(np.sum(labels[train] == 1) / len(train), 0.01,
2)
assert_almost_equal(np.sum(labels[test] == 4) / len(test), 0.10, 2)
assert_almost_equal(np.sum(labels[test] == 0) / len(test), 0.89, 2)
assert_almost_equal(np.sum(labels[test] == 1) / len(test), 0.01, 2)
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for kf in [cval.KFold(i, 5) for i in range(11, 17)]:
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), kf.n)
def test_stratifiedkfold_balance():
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
# Repeat with shuffling turned off and on
labels = [0] * 3 + [1] * 14
for shuffle in [False, True]:
for skf in [cval.StratifiedKFold(labels[:i], 3, shuffle=shuffle)
for i in range(11, 17)]:
sizes = []
for _, test in skf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), skf.n)
def test_shuffle_kfold():
# Check the indices are shuffled properly, and that all indices are
# returned in the different test folds
kf = cval.KFold(300, 3, shuffle=True, random_state=0)
ind = np.arange(300)
all_folds = None
for train, test in kf:
sorted_array = np.arange(100)
assert_true(np.any(sorted_array != ind[train]))
sorted_array = np.arange(101, 200)
assert_true(np.any(sorted_array != ind[train]))
sorted_array = np.arange(201, 300)
assert_true(np.any(sorted_array != ind[train]))
if all_folds is None:
all_folds = ind[test].copy()
else:
all_folds = np.concatenate((all_folds, ind[test]))
all_folds.sort()
assert_array_equal(all_folds, ind)
def test_shuffle_stratifiedkfold():
# Check that shuffling is happening when requested, and for proper
# sample coverage
labels = [0] * 20 + [1] * 20
kf0 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=0))
kf1 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=1))
for (_, test0), (_, test1) in zip(kf0, kf1):
assert_true(set(test0) != set(test1))
check_cv_coverage(kf0, expected_n_iter=5, n_samples=40)
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact be computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.96) than than the non
# shuffling variant (around 0.86).
digits = load_digits()
X, y = digits.data[:800], digits.target[:800]
model = SVC(C=10, gamma=0.005)
n = len(y)
cv = cval.KFold(n, 5, shuffle=False)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model with regards to the writing style of the authors
# by yielding a seriously overestimated score:
cv = cval.KFold(n, 5, shuffle=True, random_state=0)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
cv = cval.KFold(n, 5, shuffle=True, random_state=1)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = cval.StratifiedKFold(y, 5)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
def test_shuffle_split():
ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0)
ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0)
ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0)
for typ in six.integer_types:
ss4 = cval.ShuffleSplit(10, test_size=typ(2), random_state=0)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
def test_stratified_shuffle_split_init():
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2)
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2)
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8)
# Train size or test size too small
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, train_size=2)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, test_size=2)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50)
]
for y in ys:
sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33,
random_state=0)
for train, test in sss:
assert_array_equal(np.unique(y[train]), np.unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(np.unique(y[train], return_inverse=True)[1])
/ float(len(y[train])))
p_test = (np.bincount(np.unique(y[test], return_inverse=True)[1])
/ float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(y[train].size + y[test].size, y.size)
assert_array_equal(np.lib.arraysetops.intersect1d(train, test), [])
def test_stratified_shuffle_split_even():
# Test the StratifiedShuffleSplit, indices are drawn with a
# equal chance
n_folds = 5
n_iter = 1000
def assert_counts_are_ok(idx_counts, p):
# Here we test that the distribution of the counts
# per index is close enough to a binomial
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, p)
for count in idx_counts:
p = bf.pmf(count)
assert_true(p > threshold,
"An index is not drawn with chance corresponding "
"to even draws")
for n_samples in (6, 22):
labels = np.array((n_samples // 2) * [0, 1])
splits = cval.StratifiedShuffleSplit(labels, n_iter=n_iter,
test_size=1. / n_folds,
random_state=0)
train_counts = [0] * n_samples
test_counts = [0] * n_samples
n_splits = 0
for train, test in splits:
n_splits += 1
for counter, ids in [(train_counts, train), (test_counts, test)]:
for id in ids:
counter[id] += 1
assert_equal(n_splits, n_iter)
assert_equal(len(train), splits.n_train)
assert_equal(len(test), splits.n_test)
assert_equal(len(set(train).intersection(test)), 0)
label_counts = np.unique(labels)
assert_equal(splits.test_size, 1.0 / n_folds)
assert_equal(splits.n_train + splits.n_test, len(labels))
assert_equal(len(label_counts), 2)
ex_test_p = float(splits.n_test) / n_samples
ex_train_p = float(splits.n_train) / n_samples
assert_counts_are_ok(train_counts, ex_train_p)
assert_counts_are_ok(test_counts, ex_test_p)
def test_predefinedsplit_with_kfold_split():
# Check that PredefinedSplit can reproduce a split generated by Kfold.
folds = -1 * np.ones(10)
kf_train = []
kf_test = []
for i, (train_ind, test_ind) in enumerate(cval.KFold(10, 5, shuffle=True)):
kf_train.append(train_ind)
kf_test.append(test_ind)
folds[test_ind] = i
ps_train = []
ps_test = []
ps = cval.PredefinedSplit(folds)
for train_ind, test_ind in ps:
ps_train.append(train_ind)
ps_test.append(test_ind)
assert_array_equal(ps_train, kf_train)
assert_array_equal(ps_test, kf_test)
def test_leave_label_out_changing_labels():
# Check that LeaveOneLabelOut and LeavePLabelOut work normally if
# the labels variable is changed before calling __iter__
labels = np.array([0, 1, 2, 1, 1, 2, 0, 0])
labels_changing = np.array(labels, copy=True)
lolo = cval.LeaveOneLabelOut(labels)
lolo_changing = cval.LeaveOneLabelOut(labels_changing)
lplo = cval.LeavePLabelOut(labels, p=2)
lplo_changing = cval.LeavePLabelOut(labels_changing, p=2)
labels_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cval.cross_val_score(clf, X, y)
assert_array_equal(scores, clf.score(X, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
scores = cval.cross_val_score(clf, X_sparse, y)
assert_array_equal(scores, clf.score(X_sparse, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cval.cross_val_score(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cval.cross_val_score(clf, X, y.tolist())
assert_raises(ValueError, cval.cross_val_score, clf, X, y,
scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cval.cross_val_score(clf, X_3d, y)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cval.cross_val_score, clf, X_3d, y)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
cv_indices = cval.KFold(len(y), 5)
scores_indices = cval.cross_val_score(svm, X, y, cv=cv_indices)
cv_indices = cval.KFold(len(y), 5)
cv_masks = []
for train, test in cv_indices:
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cval.cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cval.cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cval.cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cval.cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cval.cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cval.cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cval.cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cval.cross_val_score, BrokenEstimator(), X)
def test_train_test_split_errors():
assert_raises(ValueError, cval.train_test_split)
assert_raises(ValueError, cval.train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, cval.train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, cval.train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, cval.train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, cval.train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), range(42))
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = np.arange(10)
# simple test
split = cval.train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
# test correspondence of X and y
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
# conversion of lists to arrays (deprecated?)
with warnings.catch_warnings(record=True):
split = cval.train_test_split(X, X_s, y.tolist(), allow_lists=False)
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_array_equal(X_train, X_s_train.toarray())
assert_array_equal(X_test, X_s_test.toarray())
# don't convert lists to anything else by default
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_true(isinstance(y_train, list))
assert_true(isinstance(y_test, list))
# allow nd-arrays
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
split = cval.train_test_split(X_4d, y_3d)
assert_equal(split[0].shape, (7, 5, 3, 2))
assert_equal(split[1].shape, (3, 5, 3, 2))
assert_equal(split[2].shape, (7, 7, 11))
assert_equal(split[3].shape, (3, 7, 11))
# test stratification option
y = np.array([1, 1, 1, 1, 2, 2, 2, 2])
for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75],
[2, 4, 2, 4, 6]):
train, test = cval.train_test_split(y,
test_size=test_size,
stratify=y,
random_state=0)
assert_equal(len(test), exp_test_size)
assert_equal(len(test) + len(train), len(y))
# check the 1:1 ratio of ones and twos in the data is preserved
assert_equal(np.sum(train == 1), np.sum(train == 2))
def train_test_split_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [MockDataFrame]
try:
from pandas import DataFrame
types.append(DataFrame)
except ImportError:
pass
for InputFeatureType in types:
# X dataframe
X_df = InputFeatureType(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, InputFeatureType))
assert_true(isinstance(X_test, InputFeatureType))
def train_test_split_mock_pandas():
# X mock dataframe
X_df = MockDataFrame(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, MockDataFrame))
assert_true(isinstance(X_test, MockDataFrame))
X_train_arr, X_test_arr = cval.train_test_split(X_df, allow_lists=False)
assert_true(isinstance(X_train_arr, np.ndarray))
assert_true(isinstance(X_test_arr, np.ndarray))
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cval.cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cval.cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
mse_scores = cval.cross_val_score(reg, X, y, cv=5,
scoring="mean_squared_error")
expected_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(mse_scores, expected_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cval.cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = cval.StratifiedKFold(y, 2)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_label, _, pvalue_label = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = cval.StratifiedKFold(y, 2)
score_label, _, pvalue_label = cval.permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum())
/ y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_cross_val_generator_with_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
# explicitly passing indices value is deprecated
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
ss = cval.ShuffleSplit(2)
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
@ignore_warnings
def test_cross_val_generator_with_default_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ss = cval.ShuffleSplit(2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
def test_shufflesplit_errors():
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1,
train_size=0.95)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3)
assert_raises(ValueError, cval.ShuffleSplit, 10, train_size=1j)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=None,
train_size=None)
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = cval.ShuffleSplit(10, random_state=21)
assert_array_equal(list(a for a, b in ss), list(a for a, b in ss))
def test_safe_split_with_precomputed_kernel():
clf = SVC()
clfp = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
K = np.dot(X, X.T)
cv = cval.ShuffleSplit(X.shape[0], test_size=0.25, random_state=0)
tr, te = list(cv)[0]
X_tr, y_tr = cval._safe_split(clf, X, y, tr)
K_tr, y_tr2 = cval._safe_split(clfp, K, y, tr)
assert_array_almost_equal(K_tr, np.dot(X_tr, X_tr.T))
X_te, y_te = cval._safe_split(clf, X, y, te, tr)
K_te, y_te2 = cval._safe_split(clfp, K, y, te, tr)
assert_array_almost_equal(K_te, np.dot(X_te, X_tr.T))
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.cross_val_score(p, X, y, cv=5)
def test_train_test_split_allow_nans():
# Check that train_test_split allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
cval.train_test_split(X, y, test_size=0.2, random_state=42)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.permutation_test_score(p, X, y, cv=5)
def test_check_cv_return_types():
X = np.ones((9, 2))
cv = cval.check_cv(3, X, classifier=False)
assert_true(isinstance(cv, cval.KFold))
y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1])
cv = cval.check_cv(3, X, y_binary, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv = cval.check_cv(3, X, y_multiclass, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
X = np.ones((5, 2))
y_seq_of_seqs = [[], [1, 2], [3], [0, 1, 3], [2]]
with warnings.catch_warnings(record=True):
# deprecated sequence of sequence format
cv = cval.check_cv(3, X, y_seq_of_seqs, classifier=True)
assert_true(isinstance(cv, cval.KFold))
y_indicator_matrix = LabelBinarizer().fit_transform(y_seq_of_seqs)
cv = cval.check_cv(3, X, y_indicator_matrix, classifier=True)
assert_true(isinstance(cv, cval.KFold))
y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]])
cv = cval.check_cv(3, X, y_multioutput, classifier=True)
assert_true(isinstance(cv, cval.KFold))
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cval.cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cval.cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cval.cross_val_score(clf, X, y,
scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = cval.KFold(len(boston.target))
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv:
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cval.cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = cval.LeaveOneOut(len(y))
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cval.cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cval.cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
def bad_cv():
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cval.cross_val_predict, est, X, y, cv=bad_cv())
def test_cross_val_predict_input_types():
clf = Ridge()
# Smoke test
predictions = cval.cross_val_predict(clf, X, y)
assert_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_equal(predictions.shape, (10, 2))
predictions = cval.cross_val_predict(clf, X_sparse, y)
assert_array_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_array_equal(predictions.shape, (10, 2))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
predictions = cval.cross_val_predict(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
predictions = cval.cross_val_predict(clf, X, y.tolist())
# test with 3d X and
X_3d = X[:, :, np.newaxis]
check_3d = lambda x: x.ndim == 3
clf = CheckingClassifier(check_X=check_3d)
predictions = cval.cross_val_predict(clf, X_3d, y)
assert_array_equal(predictions.shape, (10,))
def test_cross_val_predict_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_predict(clf, X_df, y_ser)
def test_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cval.cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_check_is_partition():
p = np.arange(100)
assert_true(cval._check_is_partition(p, 100))
assert_false(cval._check_is_partition(np.delete(p, 23), 100))
p[0] = 23
assert_false(cval._check_is_partition(p, 100))
| bsd-3-clause |
dangeles/WormFiles | applomics/src/applomics_analysis_160508.py | 1 | 1800 | """
A script to analyze applomics data.
author: [email protected]
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
df1 = pd.read_csv('../input/apple_inoculation_expt_160508.csv')
# count colonies adjusted for dilution and volume plated
df1['cfu_per_apple'] = df1.colonies * \
df1.dilution_factor/df1.volume_plated*df1.apple_mass + 5
df1.dropna(inplace=True)
# plot cfu vs inoculation factor
fig, ax = plt.subplots()
df1[df1.worms == 0].plot('inculation_factor', 'cfu_per_apple', 'scatter',
logx=True, logy=True)
df1[df1.worms == 1].plot('inculation_factor', 'cfu_per_apple', 'scatter',
logx=True, logy=True)
plt.show()
# since 10**-8 seems like an odd value, so remove it from this experiment.
df2 = df1[df1.inculation_factor > 10**-8].copy()
mean_growth_7 = df1[(df1.worms == 0) &
(df1.inculation_factor == 10**-7)].cfu_per_apple.mean()
mean_growth_6 = df1[(df1.worms == 0) &
(df1.inculation_factor == 10**-6)].cfu_per_apple.mean()
divider = np.repeat([mean_growth_6, mean_growth_7], [6, 6])
df2['fold_change'] = df2.cfu_per_apple/divider
plt.plot(df2[df2.worms == 0].inculation_factor,
df2[df2.worms == 0].fold_change, 'bo', ms=10, alpha=0.65,
label='No Worms/Mean(No Worms)')
plt.plot(df2[df2.worms == 1].inculation_factor,
df2[df2.worms == 1].fold_change, 'ro', ms=10, alpha=0.65,
label='Worms/Mean(No Worms)')
plt.xlim(5*10**-8, 2*10**-6)
plt.xscale('log')
plt.ylabel('Fold Change (worms/no worms)')
plt.xlabel('Inoculation Factor (dilution from sat. soln)')
plt.title('Effect of Worms on Bacteria')
plt.legend()
plt.savefig('../output/Fold_Change_Applomics_160508_Expt1.pdf')
plt.show()
| mit |
mwickert/SP-Comm-Tutorial-using-scikit-dsp-comm | hardware_configure/sigsys.py | 1 | 81778 | """
Signals and Systems Function Module
Copyright (c) March 2017, Mark Wickert
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of the FreeBSD Project.
"""
"""
Notes
-----
The primary purpose of this function library is to support the book Signals and Systems for Dummies. Beyond that it should be useful to anyone who wants to use Pylab for general signals and systems modeling and simulation. There is a good collection of digital communication simulation primitives included in the library. More enhancements are planned over time.
The formatted docstrings for the library follow. Click index in the upper right to get an
alphabetical listing of the library functions. In all of the example code given it is assumed that ssd has been imported into your workspace. See the examples below for import options.
Examples
--------
>>> import ssd
>>> # Commands then need to be prefixed with ssd., i.e.,
>>> ssd.tri(t,tau)
>>> # A full import of the module, to avoid the the need to prefix with ssd, is:
>>> from ssd import *
Function Catalog
----------------
"""
from matplotlib import pylab
from matplotlib import mlab
import numpy as np
from numpy import fft
import matplotlib.pyplot as plt
from scipy import signal
from scipy.io import wavfile
def CIC(M,K):
"""
% b = CIC(M,K)
% A functional form implementation of a cascade of integrator comb (CIC)
% filters. Commonly used in multirate signal processing digital
% down-converters and digital up-converters. A true CIC filter requires no
% multiplies, only add and subtract operations. The functional form created
% here is a simple FIR requiring real coefficient multiplies via filter()
% ========================================================================
% M = Effective number of taps per section (typically the decimation
% factor).
% K = The number of CIC sections cascaded (larger K gives the filter a
% wider image rejection bandwidth.
% b = FIR filter coefficients for a simple direct form implementation
% using the filter() function.
% ========================================================================
%
% Mark Wickert November 2007
"""
if K == 1:
b = np.ones(M)
else:
h = np.ones(M)
b = h
for i in range(1,K):
b = signal.convolve(b,h) # cascade by convolving impulse responses
# Make filter have unity gain at DC
return b/np.sum(b)
def ten_band_eq_filt(x,GdB,Q=3.5):
"""
Filter the input signal x with a ten-band equalizer having octave gain values in ndarray GdB.
The signal x is filtered using octave-spaced peaking filters starting at 31.25 Hz and
stopping at 16 kHz. The Q of each filter is 3.5, but can be changed. The sampling rate
is assumed to be 44.1 kHz.
Parameters
----------
x : ndarray of the input signal samples
GdB : ndarray containing ten octave band gain values [G0dB,...,G9dB]
Q : Quality factor vector for each of the NB peaking filters
Returns
-------
y : ndarray of output signal samples
Examples
--------
>>> # Test with white noise
>>> w = randn(100000)
>>> y = ten_band_eq_filt(x,GdB)
>>> psd(y,2**10,44.1)
"""
fs = 44100.0 # Hz
NB = len(GdB)
Fc = 31.25*2**np.arange(10)
B = np.zeros((NB,3))
A = np.zeros((NB,3))
# Create matrix of cascade coefficients
for k in range(NB):
[b,a] = peaking(GdB[k],Fc[k],Q)
B[k,:] = b
A[k,:] = a
#Pass signal x through the cascade of ten filters
y = np.zeros(len(x))
for k in range(NB):
if k == 0:
y = signal.lfilter(B[k,:],A[k,:],x)
else:
y = signal.lfilter(B[k,:],A[k,:],y)
return y
def ten_band_eq_resp(GdB,Q=3.5):
"""
Create a frequency response magnitude plot in dB of a ten band equalizer
using a semilogplot (semilogx()) type plot
Parameters
----------
GdB : Gain vector for 10 peaking filters [G0,...,G9]
Q : Quality factor for each peaking filter (default 3.5)
Returns
-------
Nothing : two plots are created
Examples
--------
>>> ten_band_eq_resp([0,10.0,0,0,-1,0,5,0,-4,0])
"""
fs = 44100.0 # Hz
Fc = 31.25*2**np.arange(10)
NB = len(GdB)
B = np.zeros((NB,3));
A = np.zeros((NB,3));
# Create matrix of cascade coefficients
for k in range(NB):
b,a = peaking(GdB[k],Fc[k],Q,fs)
B[k,:] = b
A[k,:] = a
# Create the cascade frequency response
F = np.logspace(1,np.log10(20e3),1000)
H = np.ones(len(F))*np.complex(1.0,0.0)
for k in range(NB):
w,Htemp = signal.freqz(B[k,:],A[k,:],2*np.pi*F/fs)
H *= Htemp
plt.figure(figsize=(6,4))
plt.subplot(211)
plt.semilogx(F,20*np.log10(abs(H)))
plt.axis([10, fs/2, -12, 12])
plt.grid()
plt.title('Ten-Band Equalizer Frequency Response')
plt.xlabel('Frequency (Hz)')
plt.ylabel('Gain (dB)')
plt.subplot(212)
plt.stem(np.arange(NB),GdB,'b','bs')
#plt.bar(np.arange(NB)-.1,GdB,0.2)
plt.axis([0, NB-1, -12, 12])
plt.xlabel('Equalizer Band Number')
plt.ylabel('Gain Set (dB)')
plt.grid()
def peaking(GdB, fc, Q=3.5, fs=44100.):
"""
A second-order peaking filter having GdB gain at fc and approximately
and 0 dB otherwise.
The filter coefficients returns correspond to a biquadratic system function
containing five parameters.
Parameters
----------
GdB : Lowpass gain in dB
fc : Center frequency in Hz
Q : Filter Q which is inversely proportional to bandwidth
fs : Sampling frquency in Hz
Returns
-------
b : ndarray containing the numerator filter coefficients
a : ndarray containing the denominator filter coefficients
Examples
--------
>>> from scipy import signal
>>> b,a = peaking(2.0,500)
>>> b,a = peaking(-5.0,500,4)
>>> # Assuming pylab imported
>>> f = logspace(1,5,400)
>>> .w,H = signal.freqz(b,a,2*pi*f/44100)
>>> semilogx(f,20*log10(abs(H)))
"""
mu = 10**(GdB/20.)
kq = 4/(1 + mu)*np.tan(2*np.pi*fc/fs/(2*Q))
Cpk = (1 + kq *mu)/(1 + kq)
b1 = -2*np.cos(2*np.pi*fc/fs)/(1 + kq*mu)
b2 = (1 - kq*mu)/(1 + kq*mu)
a1 = -2*np.cos(2*np.pi*fc/fs)/(1 + kq)
a2 = (1 - kq)/(1 + kq)
b = Cpk*np.array([1, b1, b2])
a = np.array([1, a1, a2])
return b,a
def ex6_2(n):
"""
Generate a triangle pulse as described in Example 6-2
of Chapter 6.
You need to supply an index array n that covers at least [-2, 5].
The function returns the hard-coded signal of the example.
Parameters
----------
n : time index ndarray covering at least -2 to +5.
Returns
-------
x : ndarray of signal samples in x
Examples
--------
>>> n = arange(-5,8)
>>> x = ex6_2(n)
>>> stem(n,x) # creates a stem plot of x vs n
"""
x = np.zeros(len(n))
for k, nn in enumerate(n):
if nn >= -2 and nn <= 5:
x[k] = 8 - nn
return x
def position_CD(Ka,out_type = 'fb_exact'):
"""
CD sled position control case study of Chapter 18.
The function returns the closed-loop and open-loop
system function for a CD/DVD sled position control
system. The loop amplifier gain is the only variable
that may be changed. The returned system function can
however be changed.
Parameters
----------
Ka : loop amplifier gain, start with 50.
out_type : 'open_loop' for open loop system function
out_type : 'fb_approx' for closed-loop approximation
out_type : 'fb_exact' for closed-loop exact
Returns
-------
b : numerator coefficient ndarray
a : denominator coefficient ndarray
Notes
-----
With the exception of the loop amplifier gain, all
other parameters are hard-coded from Case Study example.
Examples
------
>>> b,a = position_CD(Ka,'fb_approx')
>>> b,a = position_CD(Ka,'fb_exact')
"""
rs = 10/(2*np.pi)
# Load b and a ndarrays with the coefficients
if out_type.lower() == 'open_loop':
b = np.array([Ka*4000*rs])
a = np.array([1,1275,31250,0])
elif out_type.lower() == 'fb_approx':
b = np.array([3.2*Ka*rs])
a = np.array([1, 25, 3.2*Ka*rs])
elif out_type.lower() == 'fb_exact':
b = np.array([4000*Ka*rs])
a = np.array([1, 1250+25, 25*1250, 4000*Ka*rs])
else:
print('out_type must be: open_loop, fb_approx, or fc_exact')
return 1
return b, a
def cruise_control(wn,zeta,T,vcruise,vmax,tf_mode='H'):
"""
Cruise control with PI controller and hill disturbance.
This function returns various system function configurations
for a the cruise control Case Study example found in
the supplementary article. The plant model is obtained by the
linearizing the equations of motion and the controller contains a
proportional and integral gain term set via the closed-loop parameters
natuarl frequency wn (rad/s) and damping zeta.
Parameters
----------
wn : closed-loop natural frequency in rad/s, nominally 0.1
zeta : closed-loop damping factor, nominally 1.0
T : vehicle time constant, nominally 10 s
vcruise : cruise velocity set point, nominally 75 mph
vmax : maximum vehicle velocity, nominally 120 mph
tf_mode : 'H', 'HE', 'HVW', or 'HED' controls the system function returned by the function
'H' : closed-loop system function V(s)/R(s)
'HE' : closed-loop system function E(s)/R(s)
'HVW' : closed-loop system function V(s)/W(s)
'HED' : closed-loop system function E(s)/D(s), where D is the hill disturbance input
Returns
-------
b : numerator coefficient ndarray
a : denominator coefficient ndarray
Examples
--------
>>> # return the closed-loop system function output/input velocity
>>> b,a = cruise_control(wn,zeta,T,vcruise,vmax,tf_mode='H')
>>> # return the closed-loop system function loop error/hill disturbance
>>> b,a = cruise_control(wn,zeta,T,vcruise,vmax,tf_mode='HED')
"""
tau = T/2.*vmax/vcruise
g = 9.8
g *= 3*60**2/5280. # m/s to mph conversion
Kp = T/vmax*(2*zeta*wn-1/tau)
Ki = T/vmax*wn**2
K = Kp*vmax/T
print('wn = ', np.sqrt(K/(Kp/Ki)))
print('zeta = ', (K + 1/tau)/(2*wn))
a = np.array([1, 2*zeta*wn, wn**2])
if tf_mode == 'H':
b = np.array([K, wn**2])
elif tf_mode == 'HE':
b = np.array([1, 2*zeta*wn-K, 0.])
elif tf_mode == 'HVW':
b = np.array([ 1, wn**2/K+1/tau, wn**2/(K*tau)])
b *= Kp
elif tf_mode == 'HED':
b = np.array([g, 0])
else:
print('tf_mode must be: H, HE, HVU, or HED')
return 1
return b, a
def splane(b,a,auto_scale=True,size=[-1,1,-1,1]):
"""
Create an s-plane pole-zero plot.
As input the function uses the numerator and denominator
s-domain system function coefficient ndarrays b and a respectively.
Assumed to be stored in descending powers of s.
Parameters
----------
b : numerator coefficient ndarray.
a : denominator coefficient ndarray.
auto_scale : True
size : [xmin,xmax,ymin,ymax] plot scaling when scale = False
Returns
-------
(M,N) : tuple of zero and pole counts + plot window
Notes
-----
This function tries to identify repeated poles and zeros and will
place the multiplicity number above and to the right of the pole or zero.
The difficulty is setting the tolerance for this detection. Currently it
is set at 1e-3 via the function signal.unique_roots.
Examples
--------
>>> # Here the plot is generated using auto_scale
>>> splane(b,a)
>>> # Here the plot is generated using manual scaling
>>> splane(b,a,False,[-10,1,-10,10])
"""
M = len(b) - 1
N = len(a) - 1
plt.figure(figsize=(5,5))
#plt.axis('equal')
N_roots = np.array([0.0])
if M > 0:
N_roots = np.roots(b)
D_roots = np.array([0.0])
if N > 0:
D_roots = np.roots(a)
if auto_scale:
size[0] = min(np.min(np.real(N_roots)),np.min(np.real(D_roots)))-0.5
size[1] = max(np.max(np.real(N_roots)),np.max(np.real(D_roots)))+0.5
size[1] = max(size[1],0.5)
size[2] = min(np.min(np.imag(N_roots)),np.min(np.imag(D_roots)))-0.5
size[3] = max(np.max(np.imag(N_roots)),np.max(np.imag(D_roots)))+0.5
plt.plot([size[0],size[1]],[0,0],'k--')
plt.plot([0,0],[size[2],size[3]],'r--')
# Plot labels if multiplicity greater than 1
x_scale = size[1]-size[0]
y_scale = size[3]-size[2]
x_off = 0.03
y_off = 0.01
if M > 0:
#N_roots = np.roots(b)
N_uniq, N_mult=signal.unique_roots(N_roots,tol=1e-3, rtype='avg')
plt.plot(np.real(N_uniq),np.imag(N_uniq),'ko',mfc='None',ms=8)
idx_N_mult = mlab.find(N_mult>1)
for k in range(len(idx_N_mult)):
x_loc = np.real(N_uniq[idx_N_mult[k]]) + x_off*x_scale
y_loc =np.imag(N_uniq[idx_N_mult[k]]) + y_off*y_scale
plt.text(x_loc,y_loc,str(N_mult[idx_N_mult[k]]),ha='center',va='bottom',fontsize=10)
if N > 0:
#D_roots = np.roots(a)
D_uniq, D_mult=signal.unique_roots(D_roots,tol=1e-3, rtype='avg')
plt.plot(np.real(D_uniq),np.imag(D_uniq),'kx',ms=8)
idx_D_mult = mlab.find(D_mult>1)
for k in range(len(idx_D_mult)):
x_loc = np.real(D_uniq[idx_D_mult[k]]) + x_off*x_scale
y_loc =np.imag(D_uniq[idx_D_mult[k]]) + y_off*y_scale
plt.text(x_loc,y_loc,str(D_mult[idx_D_mult[k]]),ha='center',va='bottom',fontsize=10)
plt.xlabel('Real Part')
plt.ylabel('Imaginary Part')
plt.title('Pole-Zero Plot')
#plt.grid()
plt.axis(np.array(size))
return M,N
def OS_filter(x,h,N,mode=0):
"""
Overlap and save transform domain FIR filtering.
This function implements the classical overlap and save method of
transform domain filtering using a length P FIR filter.
Parameters
----------
x : input signal to be filtered as an ndarray
h : FIR filter coefficients as an ndarray of length P
N : FFT size > P, typically a power of two
mode : 0 or 1, when 1 returns a diagnostic matrix
Returns
-------
y : the filtered output as an ndarray
y_mat : an ndarray whose rows are the individual overlap outputs.
Notes
-----
y_mat is used for diagnostics and to gain understanding of the algorithm.
Examples
--------
>>> n = arange(0,100)
>>> x cos(2*pi*0.05*n)
>>> b = ones(10)
>>> y = OS_filter(x,h,N)
>>> # set mode = 1
>>> y, y_mat = OS_filter(x,h,N,1)
"""
P = len(h)
# zero pad start of x so first frame can recover first true samples of x
x = np.hstack((np.zeros(P-1),x))
L = N - P + 1
Nx = len(x)
Nframe = int(np.ceil(Nx/float(L)))
# zero pad end of x to full number of frames needed
x = np.hstack((x,np.zeros(Nframe*L-Nx)))
y = np.zeros(Nframe*N)
# create an instrumentation matrix to observe the overlap and save behavior
y_mat = np.zeros((Nframe,Nframe*N))
H = fft.fft(h,N)
# begin the filtering operation
for k in range(Nframe):
xk = x[k*L:k*L+N]
Xk = fft.fft(xk,N)
Yk = H*Xk
yk = np.real(fft.ifft(Yk)) # imag part should be zero
y[k*L+P-1:k*L+N] = yk[P-1:]
y_mat[k,k*L:k*L+N] = yk
if mode == 1:
return y[P-1:Nx], y_mat[:,P-1:Nx]
else:
return y[P-1:Nx]
def OA_filter(x,h,N,mode=0):
"""
Overlap and add transform domain FIR filtering.
This function implements the classical overlap and add method of
transform domain filtering using a length P FIR filter.
Parameters
----------
x : input signal to be filtered as an ndarray
h : FIR filter coefficients as an ndarray of length P
N : FFT size > P, typically a power of two
mode : 0 or 1, when 1 returns a diagnostic matrix
Returns
-------
y : the filtered output as an ndarray
y_mat : an ndarray whose rows are the individual overlap outputs.
Notes
-----
y_mat is used for diagnostics and to gain understanding of the algorithm.
Examples
--------
>>> n = arange(0,100)
>>> x cos(2*pi*0.05*n)
>>> b = ones(10)
>>> y = OA_filter(x,h,N)
>>> # set mode = 1
>>> y, y_mat = OA_filter(x,h,N,1)
"""
P = len(h)
L = N - P + 1 # need N >= L + P -1
Nx = len(x)
Nframe = int(np.ceil(Nx/float(L)))
# zero pad to full number of frames needed
x = np.hstack((x,np.zeros(Nframe*L-Nx)))
y = np.zeros(Nframe*N)
# create an instrumentation matrix to observe the overlap and add behavior
y_mat = np.zeros((Nframe,Nframe*N))
H = fft.fft(h,N)
# begin the filtering operation
for k in range(Nframe):
xk = x[k*L:(k+1)*L]
Xk = fft.fft(xk,N)
Yk = H*Xk
yk = np.real(fft.ifft(Yk))
y[k*L:k*L+N] += yk
y_mat[k,k*L:k*L+N] = yk
if mode == 1:
return y[0:Nx], y_mat[:,0:Nx]
else:
return y[0:Nx]
def lp_samp(fb,fs,fmax,N,shape='tri',fsize=(6,4)):
"""
Lowpass sampling theorem plotting function.
Display the spectrum of a sampled signal after setting the bandwidth,
sampling frequency, maximum display frequency, and spectral shape.
Parameters
----------
fb : spectrum lowpass bandwidth in Hz
fs : sampling frequency in Hz
fmax : plot over [-fmax,fmax]
shape : 'tri' or 'line'
N : number of translates, N positive and N negative
fsize : the size of the figure window, default (6,4)
Returns
-------
Nothing : A plot window opens containing the spectrum plot
Examples
--------
>>> # No aliasing as 10 < 25/2
>>> lp_samp(10,25,50,10)
>>> # Aliasing as 15 > 25/2
>>> lp_samp(15,25,50,10)
"""
plt.figure(figsize=fsize)
# define the plot interval
f = np.arange(-fmax,fmax+fmax/200.,fmax/200.)
A = 1.0;
line_ampl = A/2.*np.array([0, 1])
# plot the lowpass spectrum in black
if shape.lower() == 'tri':
plt.plot(f,lp_tri(f,fb))
elif shape.lower() == 'line':
plt.plot([fb, fb],line_ampl,'b', linewidth=2)
plt.plot([-fb, -fb],line_ampl,'b', linewidth=2)
else:
print('shape must be tri or line')
# overlay positive and negative frequency translates
for n in range(N):
if shape.lower() == 'tri':
plt.plot(f,lp_tri(f-(n+1)*fs,fb),'--r')
plt.plot(f,lp_tri(f+(n+1)*fs,fb),'--g')
elif shape.lower() == 'line':
plt.plot([fb+(n+1)*fs, fb+(n+1)*fs],line_ampl,'--r', linewidth=2)
plt.plot([-fb+(n+1)*fs, -fb+(n+1)*fs],line_ampl,'--r', linewidth=2)
plt.plot([fb-(n+1)*fs, fb-(n+1)*fs],line_ampl,'--g', linewidth=2)
plt.plot([-fb-(n+1)*fs, -fb-(n+1)*fs],line_ampl,'--g', linewidth=2)
else:
print('shape must be tri or line')
#plt.title('Lowpass Sampling Theorem for a Real Signal: Blk = orig, dotted = translates')
plt.ylabel('Spectrum Magnitude')
plt.xlabel('Frequency in Hz')
plt.axis([-fmax,fmax,0,1])
plt.grid()
def lp_tri(f, fb):
"""
Triangle spectral shape function used by lp_spec.
This is a support function for the lowpass spectrum plotting function
lp_spec().
Parameters
----------
f : ndarray containing frequency samples
fb : the bandwidth as a float constant
Returns
-------
x : ndarray of spectrum samples for a single triangle shape
Examples
--------
>>> x = lp_tri(f, fb)
"""
x = np.zeros(len(f))
for k in range(len(f)):
if abs(f[k]) <= fb:
x[k] = 1 - abs(f[k])/float(fb)
return x
def sinusoidAWGN(x,SNRdB):
"""
Add white Gaussian noise to a single real sinusoid.
Input a single sinusoid to this function and it returns a noisy
sinusoid at a specific SNR value in dB. Sinusoid power is calculated
using np.var.
Parameters
----------
x : Input signal as ndarray consisting of a single sinusoid
SNRdB : SNR in dB for output sinusoid
Returns
-------
y : Noisy sinusoid return vector
Examples
--------
>>> # set the SNR to 10 dB
>>> n = arange(0,10000)
>>> x = cos(2*pi*0.04*n)
>>> y = sinusoidAWGN(x,10.0)
"""
# Estimate signal power
x_pwr = np.var(x)
# Create noise vector
noise = np.sqrt(x_pwr/10**(SNRdB/10.))*np.random.randn(len(x));
return x + noise
def simpleQuant(x,Btot,Xmax,Limit):
"""
A simple rounding quantizer for bipolar signals having Btot = B + 1 bits.
This function models a quantizer that employs Btot bits that has one of
three selectable limiting types: saturation, overflow, and none.
The quantizer is bipolar and implements rounding.
Parameters
----------
x : input signal ndarray to be quantized
Btot : total number of bits in the quantizer, e.g. 16
Xmax : quantizer full-scale dynamic range is [-Xmax, Xmax]
Limit = Limiting of the form 'sat', 'over', 'none'
Returns
-------
xq : quantized output ndarray
Notes
-----
The quantization can be formed as e = xq - x
Examples
--------
>>> n = arange(0,10000)
>>> x = cos(2*pi*0.211*n)
>>> y = sinusoidAWGN(x,90)
>>> yq = simpleQuant(y,12,1,sat)
>>> psd(y,2**10,Fs=1);
>>> psd(yq,2**10,Fs=1)
"""
B = Btot-1
x = x/Xmax
if Limit.lower() == 'over':
xq = (np.mod(np.round(x*2**B)+2**B,2**Btot)-2**B)/2**B
elif Limit.lower() == 'sat':
xq = np.round(x*2**B)+2**B
s1 = mlab.find(xq >= 2**Btot-1)
s2 = mlab.find(xq < 0)
xq[s1] = (2**Btot - 1)*np.ones(len(s1))
xq[s2] = np.zeros(len(s2))
xq = (xq - 2**B)/2**B
elif Limit.lower() == 'none':
xq = np.round(x*2**B)/2**B
else:
print('limit must be the string over, sat, or none')
return xq*Xmax
def prin_alias(f_in,fs):
"""
Calculate the principle alias frequencies.
Given an array of input frequencies the function returns an
array of principle alias frequencies.
Parameters
----------
f_in : ndarray of input frequencies
fs : sampling frequency
Returns
-------
f_out : ndarray of principle alias frequencies
Examples
--------
>>> # Linear frequency sweep from 0 to 50 Hz
>>> f_in = arange(0,50,0.1)
>>> # Calculate principle alias with fs = 10 Hz
>>> f_out = prin_alias(f_in,10)
"""
return abs(np.rint(f_in/fs)*fs - f_in)
"""
Principle alias via recursion
f_out = np.copy(f_in)
for k in range(len(f_out)):
while f_out[k] > fs/2.:
f_out[k] = abs(f_out[k] - fs)
return f_out
"""
def cascade_filters(b1,a1,b2,a2):
"""
Cascade two IIR digital filters into a single (b,a) coefficient set.
To cascade two digital filters (system functions) given their numerator
and denominator coefficients you simply convolve the coefficient arrays.
Parameters
----------
b1 : ndarray of numerator coefficients for filter 1
a1 : ndarray of denominator coefficients for filter 1
b2 : ndarray of numerator coefficients for filter 2
a2 : ndarray of denominator coefficients for filter 2
Returns
-------
b : ndarray of numerator coefficients for the cascade
a : ndarray of denominator coefficients for the cascade
Examples
--------
>>> from scipy import signal
>>> b1,a1 = signal.butter(3, 0.1)
>>> b2,a2 = signal.butter(3, 0.15)
>>> b,a = cascade_filters(b1,a1,b2,a2)
"""
return signal.convolve(b1,b2), signal.convolve(a1,a2)
def soi_snoi_gen(s,SIR_dB,N,fi,fs = 8000):
"""
Add an interfering sinusoidal tone to the input signal at a given SIR_dB.
The input is the signal of interest (SOI) and number of sinsuoid signals
not of interest (SNOI) are addedto the SOI at a prescribed signal-to-
intereference SIR level in dB.
Parameters
----------
s : ndarray of signal of SOI
SIR_dB : interference level in dB
N : Trim input signal s to length N + 1 samples
fi : ndarray of intereference frequencies in Hz
fs : sampling rate in Hz, default is 8000 Hz
Returns
-------
r : ndarray of combined signal plus intereference of length N+1 samples
Examples
--------
>>> # load a speech ndarray and trim to 5*8000 + 1 samples
>>> fs,s = from_wav('OSR_us_000_0030_8k.wav')
>>> r = soi_snoi_gen(s,10,5*8000,[1000, 1500])
"""
n = np.arange(0,N+1)
K = len(fi)
si = np.zeros(N+1)
for k in range(K):
si += np.cos(2*np.pi*fi[k]/fs*n);
s = s[:N+1]
Ps = np.var(s)
Psi = np.var(si)
r = s + np.sqrt(Ps/Psi*10**(-SIR_dB/10))*si
return r
def lms_ic(r,M,mu,delta=1):
"""
Least mean square (LMS) interference canceller adaptive filter.
A complete LMS adaptive filter simulation function for the case of
interference cancellation. Used in the digital filtering case study.
Parameters
----------
M : FIR Filter length (order M-1)
delta : Delay used to generate the reference signal
mu : LMS step-size
delta : decorrelation delay between input and FIR filter input
Returns
-------
n : ndarray Index vector
r : ndarray noisy (with interference) input signal
r_hat : ndarray filtered output (NB_hat[n])
e : ndarray error sequence (WB_hat[n])
ao : ndarray final value of weight vector
F : ndarray frequency response axis vector
Ao : ndarray frequency response of filter
Examples
----------
>>> # import a speech signal
>>> fs,s = from_wav('OSR_us_000_0030_8k.wav')
>>> # add interference at 1kHz and 1.5 kHz and
>>> # truncate to 5 seconds
>>> r = soi_snoi_gen(s,10,5*8000,[1000, 1500])
>>> # simulate with a 64 tap FIR and mu = 0.005
>>> n,r,r_hat,e,ao,F,Ao = lms_ic(r,64,0.005)
"""
N = len(r)-1;
# Form the reference signal y via delay delta
y = signal.lfilter(np.hstack((np.zeros(delta), np.array([1]))),1,r)
# Initialize output vector x_hat to zero
r_hat = np.zeros(N+1)
# Initialize error vector e to zero
e = np.zeros(N+1)
# Initialize weight vector to zero
ao = np.zeros(M+1)
# Initialize filter memory to zero
z = np.zeros(M)
# Initialize a vector for holding ym of length M+1
ym = np.zeros(M+1)
for k in range(N+1):
# Filter one sample at a time
r_hat[k],z = signal.lfilter(ao,np.array([1]),np.array([y[k]]),zi=z)
# Form the error sequence
e[k] = r[k] - r_hat[k]
# Update the weight vector
ao = ao + 2*mu*e[k]*ym
# Update vector used for correlation with e(k)
ym = np.hstack((np.array([y[k]]), ym[:-1]))
# Create filter frequency response
F, Ao = signal.freqz(ao,1,1024)
F/= (2*np.pi)
Ao = 20*np.log10(abs(Ao))
return np.arange(0,N+1), r, r_hat, e, ao, F, Ao
def fir_iir_notch(fi,fs,r=0.95):
"""
Design a second-order FIR or IIR notch filter.
A second-order FIR notch filter is created by placing conjugate
zeros on the unit circle at angle corresponidng to the notch center
frequency. The IIR notch variation places a pair of conjugate poles
at the same angle, but with radius r < 1 (typically 0.9 to 0.95).
Parameters
----------
fi : notch frequency is Hz relative to fs
fs : the sampling frequency in Hz, e.g. 8000
r : pole radius for IIR version, default = 0.95
Returns
-------
b : numerator coefficient ndarray
a : denominator coefficient ndarray
Notes
-----
If the pole radius is 0 then an FIR version is created, that is
there are no poles except at z = 0.
Examples
--------
>>> b_FIR, a_FIR = fir_iir_notch(1000,8000,0)
>>> b_IIR, a_IIR = fir_iir_notch(1000,8000)
"""
w0 = 2*np.pi*fi/float(fs)
if r >= 1:
print('Poles on or outside unit circle.')
if r == 0:
a = np.array([1.0])
else:
a = np.array([1, -2*r*np.cos(w0), r**2])
b = np.array([1, -2*np.cos(w0), 1])
return b, a
def simple_SA(x,NS,NFFT,fs,NAVG=1,window='boxcar'):
"""
Spectral estimation using windowing and averaging.
This function implements averaged periodogram spectral estimation
estimation similar to the NumPy's psd() function, but more
specialized for the the windowing case study of Chapter 16.
Parameters
----------
x : ndarray containing the input signal
NS : The subrecord length less zero padding, e.g. NS < NFFT
NFFT : FFT length, e.g., 1024 = 2**10
fs : sampling rate in Hz
NAVG : the number of averages, e.g., 1 for deterministic signals
window : hardcoded window 'boxcar' (default) or 'hanning'
Returns
-------
f : ndarray frequency axis in Hz on [0, fs/2]
Sx : ndarray the power spectrum estimate
Notes
-----
The function also prints the maximum number of averages K possible
for the input data record.
Examples
--------
>>> n = arange(0,2048)
>>> x = cos(2*pi*1000/10000*n) + 0.01*cos(2*pi*3000/10000*n)
>>> f, Sx = simple_SA(x,128,512,10000)
>>> f, Sx = simple_SA(x,256,1024,10000,window='hanning')
>>> plot(f, 10*log10(Sx))
"""
Nx = len(x)
K = Nx/NS
print('K = ', K)
if NAVG > K:
print('NAVG exceeds number of available subrecords')
return 0,0
if window.lower() == 'boxcar' or window.lower() == 'rectangle':
w = signal.boxcar(NS)
elif window.lower() == 'hanning':
w = signal.hanning(NS)
xsw = np.zeros((K,NS)) + 1j*np.zeros((K,NS))
for k in range(NAVG):
xsw[k,] = w*x[k*NS:(k+1)*NS]
Sx = np.zeros(NFFT)
for k in range(NAVG):
X = fft.fft(xsw[k,],NFFT)
Sx += abs(X)**2
Sx /= float(NAVG)
Sx /= float(NFFT**2)
if x.dtype != 'complex128':
n = np.arange(NFFT/2)
f = fs*n/float(NFFT)
Sx = Sx[0:NFFT/2]
else:
n = np.arange(NFFT/2)
f = fs*np.hstack((np.arange(-NFFT/2,0),np.arange(NFFT/2)))/float(NFFT)
Sx = np.hstack((Sx[NFFT/2:],Sx[0:NFFT/2]))
return f, Sx
def line_spectra(fk,Xk,mode,sides=2,linetype='b',lwidth=2,floor_dB=-100,fsize=(6,4)):
"""
Plot the Fouier series line spectral given the coefficients.
This function plots two-sided and one-sided line spectra of a periodic
signal given the complex exponential Fourier series coefficients and
the corresponding harmonic frequencies.
Parameters
----------
fk : vector of real sinusoid frequencies
Xk : magnitude and phase at each positive frequency in fk
mode : 'mag' => magnitude plot, 'magdB' => magnitude in dB plot,
mode cont : 'magdBn' => magnitude in dB normalized, 'phase' => a phase plot in radians
sides : 2; 2-sided or 1-sided
linetype : line type per Matplotlib definitions, e.g., 'b';
lwidth : 2; linewidth in points
fsize : optional figure size in inches, default = (6,4) inches
Returns
-------
Nothing : A plot window opens containing the line spectrum plot
Notes
-----
Since real signals are assumed the frequencies of fk are 0 and/or positive
numbers. The supplied Fourier coefficients correspond.
Examples
--------
>>> n = arange(0,25)
>>> # a pulse train with 10 Hz fundamental and 20% duty cycle
>>> fk = n*10
>>> Xk = sinc(n*10*.02)*exp(-1j*2*pi*n*10*.01) # 1j = sqrt(-1)
>>> line_spectra(fk,Xk,'mag')
>>> line_spectra(fk,Xk,'phase')
"""
plt.figure(figsize=fsize)
# Eliminate zero valued coefficients
idx = pylab.find(Xk != 0)
Xk = Xk[idx]
fk = fk[idx]
if mode == 'mag':
for k in range(len(fk)):
if fk[k] == 0 and sides == 2:
plt.plot([fk[k], fk[k]],[0, np.abs(Xk[k])],linetype, linewidth=lwidth)
elif fk[k] == 0 and sides == 1:
plt.plot([fk[k], fk[k]],[0, np.abs(Xk[k])],linetype, linewidth=2*lwidth)
elif fk[k] > 0 and sides == 2:
plt.plot([fk[k], fk[k]],[0, np.abs(Xk[k])],linetype, linewidth=lwidth)
plt.plot([-fk[k], -fk[k]],[0, np.abs(Xk[k])],linetype, linewidth=lwidth)
elif fk[k] > 0 and sides == 1:
plt.plot([fk[k], fk[k]],[0, 2.*np.abs(Xk[k])],linetype, linewidth=lwidth)
else:
print('Invalid sides type')
plt.grid()
if sides == 2:
plt.axis([-1.2*max(fk), 1.2*max(fk), 0, 1.05*max(abs(Xk))])
elif sides == 1:
plt.axis([0, 1.2*max(fk), 0, 1.05*2*max(abs(Xk))])
else:
print('Invalid sides type')
plt.ylabel('Magnitude')
plt.xlabel('Frequency (Hz)')
elif mode == 'magdB':
Xk_dB = 20*np.log10(np.abs(Xk))
for k in range(len(fk)):
if fk[k] == 0 and sides == 2:
plt.plot([fk[k], fk[k]],[floor_dB, Xk_dB[k]],linetype, linewidth=lwidth)
elif fk[k] == 0 and sides == 1:
plt.plot([fk[k], fk[k]],[floor_dB, Xk_dB[k]],linetype, linewidth=2*lwidth)
elif fk[k] > 0 and sides == 2:
plt.plot([fk[k], fk[k]],[floor_dB, Xk_dB[k]],linetype, linewidth=lwidth)
plt.plot([-fk[k], -fk[k]],[floor_dB, Xk_dB[k]],linetype, linewidth=lwidth)
elif fk[k] > 0 and sides == 1:
plt.plot([fk[k], fk[k]],[floor_dB, Xk_dB[k]+6.02],linetype, linewidth=lwidth)
else:
print('Invalid sides type')
plt.grid()
max_dB = np.ceil(max(Xk_dB/10.))*10
min_dB = max(floor_dB,np.floor(min(Xk_dB/10.))*10)
if sides == 2:
plt.axis([-1.2*max(fk), 1.2*max(fk), min_dB, max_dB])
elif sides == 1:
plt.axis([0, 1.2*max(fk), min_dB, max_dB])
else:
print('Invalid sides type')
plt.ylabel('Magnitude (dB)')
plt.xlabel('Frequency (Hz)')
elif mode == 'magdBn':
Xk_dB = 20*np.log10(np.abs(Xk)/max(np.abs(Xk)))
for k in range(len(fk)):
if fk[k] == 0 and sides == 2:
plt.plot([fk[k], fk[k]],[floor_dB, Xk_dB[k]],linetype, linewidth=lwidth)
elif fk[k] == 0 and sides == 1:
plt.plot([fk[k], fk[k]],[floor_dB, Xk_dB[k]],linetype, linewidth=2*lwidth)
elif fk[k] > 0 and sides == 2:
plt.plot([fk[k], fk[k]],[floor_dB, Xk_dB[k]],linetype, linewidth=lwidth)
plt.plot([-fk[k], -fk[k]],[floor_dB, Xk_dB[k]],linetype, linewidth=lwidth)
elif fk[k] > 0 and sides == 1:
plt.plot([fk[k], fk[k]],[floor_dB, Xk_dB[k]+6.02],linetype, linewidth=lwidth)
else:
print('Invalid sides type')
plt.grid()
max_dB = np.ceil(max(Xk_dB/10.))*10
min_dB = max(floor_dB,np.floor(min(Xk_dB/10.))*10)
if sides == 2:
plt.axis([-1.2*max(fk), 1.2*max(fk), min_dB, max_dB])
elif sides == 1:
plt.axis([0, 1.2*max(fk), min_dB, max_dB])
else:
print('Invalid sides type')
plt.ylabel('Normalized Magnitude (dB)')
plt.xlabel('Frequency (Hz)')
elif mode == 'phase':
for k in range(len(fk)):
if fk[k] == 0 and sides == 2:
plt.plot([fk[k], fk[k]],[0, np.angle(Xk[k])],linetype, linewidth=lwidth)
elif fk[k] == 0 and sides == 1:
plt.plot([fk[k], fk[k]],[0, np.angle(Xk[k])],linetype, linewidth=2*lwidth)
elif fk[k] > 0 and sides == 2:
plt.plot([fk[k], fk[k]],[0, np.angle(Xk[k])],linetype, linewidth=lwidth)
plt.plot([-fk[k], -fk[k]],[0, -np.angle(Xk[k])],linetype, linewidth=lwidth)
elif fk[k] > 0 and sides == 1:
plt.plot([fk[k], fk[k]],[0, np.angle(Xk[k])],linetype, linewidth=lwidth)
else:
print('Invalid sides type')
plt.grid()
if sides == 2:
plt.plot([-1.2*max(fk), 1.2*max(fk)], [0, 0],'k')
plt.axis([-1.2*max(fk), 1.2*max(fk), -1.1*max(np.abs(np.angle(Xk))), 1.1*max(np.abs(np.angle(Xk)))])
elif sides == 1:
plt.plot([0, 1.2*max(fk)], [0, 0],'k')
plt.axis([0, 1.2*max(fk), -1.1*max(np.abs(np.angle(Xk))), 1.1*max(np.abs(np.angle(Xk)))])
else:
print('Invalid sides type')
plt.ylabel('Phase (rad)')
plt.xlabel('Frequency (Hz)')
else:
print('Invalid mode type')
def fs_coeff(xp,N,f0,one_side=True):
"""
Numerically approximate the Fourier series coefficients given periodic x(t).
The input is assummed to represent one period of the waveform
x(t) that has been uniformly sampled. The number of samples supplied
to represent one period of the waveform sets the sampling rate.
Parameters
----------
xp : ndarray of one period of the waveform x(t)
N : maximum Fourier series coefficient, [0,...,N]
f0 : fundamental frequency used to form fk.
Returns
-------
Xk : ndarray of the coefficients over indices [0,1,...,N]
fk : ndarray of the harmonic frequencies [0, f0,2f0,...,Nf0]
Notes
-----
len(xp) >= 2*N+1 as len(xp) is the fft length.
Examples
--------
>>> t = arange(0,1,1/1024.)
>>> # a 20% duty cycle pulse starting at t = 0
>>> x_rect = rect(t-.1,0.2)
>>> Xk, fk = fs_coeff(x_rect,25,10)
>>> # plot the spectral lines
>>> line_spectra(fk,Xk,'mag')
"""
Nint = len(xp)
if Nint < 2*N+1:
print('Number of samples in xp insufficient for requested N.')
return 0,0
Xp = fft.fft(xp,Nint)/float(Nint)
# To interface with the line_spectra function use one_side mode
if one_side:
Xk = Xp[0:N+1]
fk = f0*np.arange(0,N+1)
else:
Xk = np.hstack((Xp[-N:],Xp[0:N+1]))
fk = f0*np.arange(-N,N+1)
return Xk, fk
def fs_approx(Xk,fk,t):
"""
Synthesize periodic signal x(t) using Fourier series coefficients at harmonic frequencies
Assume the signal is real so coefficients Xk are supplied for nonnegative
indicies. The negative index coefficients are assumed to be complex
conjugates.
Parameters
----------
Xk : ndarray of complex Fourier series coefficients
fk : ndarray of harmonic frequencies in Hz
t : ndarray time axis corresponding to output signal array x_approx
Returns
-------
x_approx : ndarray of periodic waveform approximation over time span t
Examples
--------
>>> t = arange(0,2,.002)
>>> # a 20% duty cycle pulse train
>>> n = arange(0,20,1) # 0 to 19th harmonic
>>> fk = 1*n % period = 1s
>>> t, x_approx = fs_approx(Xk,fk,t)
>>> plot(t,x_approx)
"""
x_approx = np.zeros(len(t))
for k,Xkk in enumerate(Xk):
if fk[k] == 0:
x_approx += Xkk*np.ones(len(t))
else:
x_approx += 2*np.abs(Xkk)*np.cos(2*np.pi*fk[k]*t+np.angle(Xkk))
return x_approx
def conv_sum(x1,nx1,x2,nx2,extent=('f','f')):
"""
Discrete convolution of x1 and x2 with proper tracking of the output time axis.
Convolve two discrete-time signals using the SciPy function signal.convolution.
The time (sequence axis) are managed from input to output. y[n] = x1[n]*x2[n].
Parameters
----------
x1 : ndarray of signal x1 corresponding to nx1
nx1 : ndarray time axis for x1
x2 : ndarray of signal x2 corresponding to nx2
nx2 : ndarray time axis for x2
extent : ('e1','e2') where 'e1', 'e2' may be 'f' finite, 'r' right-sided, or 'l' left-sided
Returns
-------
y : ndarray of output values y
ny : ndarray of the corresponding sequence index n
Notes
-----
The output time axis starts at the sum of the starting values in x1 and x2
and ends at the sum of the two ending values in x1 and x2. The default
extents of ('f','f') are used for signals that are active (have support)
on or within n1 and n2 respectively. A right-sided signal such as
a^n*u[n] is semi-infinite, so it has extent 'r' and the
convolution output will be truncated to display only the valid results.
Examples
--------
>>> nx = arange(-5,10)
>>> x = drect(nx,4)
>>> y,ny = conv_sum(x,nx,x,nx)
>>> stem(ny,y)
>>> # Consider a pulse convolved with an exponential ('r' type extent)
>>> h = 0.5**nx*dstep(nx)
>>> y,ny = conv_sum(x,nx,h,nx,('f','r')) # note extents set
>>> stem(ny,y) # expect a pulse charge and discharge sequence
"""
nnx1 = np.arange(0,len(nx1))
nnx2 = np.arange(0,len(nx2))
n1 = nnx1[0]
n2 = nnx1[-1]
n3 = nnx2[0]
n4 = nnx2[-1]
# Start by finding the valid output support or extent interval to insure that
# for no finite extent signals ambiquous results are not returned.
# Valid extents are f (finite), r (right-sided), and l (left-sided)
if extent[0] == 'f' and extent[1] == 'f':
nny = np.arange(n1+n3,n2+1+n4+1-1)
ny = np.arange(0,len(x1)+len(x2)-1) + nx1[0]+nx2[0]
elif extent[0] == 'f' and extent[1] == 'r':
nny = np.arange(n1+n3,n1+1+n4+1-1)
ny = nny + nx1[0]+nx2[0]
elif extent[0] == 'r' and extent[1] == 'f':
nny = np.arange(n1+n3,n2+1+n3+1-1)
ny = nny + nx1[0]+nx2[0]
elif extent[0] == 'f' and extent[1] == 'l':
nny = np.arange(n2+n3,n2+1+n4+1-1)
ny = nny + nx1[-1]+nx2[0]
elif extent[0] == 'l' and extent[1] == 'f':
nny = np.arange(n1+n4,n2+1+n4+1-1)
ny = nny + tx1[0]+tx2[-1]
elif extent[0] == 'r' and extent[1] == 'r':
nny = np.arange(n1+n3,min(n1+1+n4+1,n2+1+n3+1)-1)
ny = nny + nx1[0]+nx2[0]
elif extent[0] == 'l' and extent[1] == 'l':
nny = np.arange(max(n1+n4,n2+n3),n2+1+n4+1-1)
ny = nny + max(nx1[0]+nx2[-1],nx1[-1]+nx2[0])
else:
print('Invalid x1 x2 extents specified or valid extent not found!')
return 0,0
# Finally convolve the sequences
y = signal.convolve(x1, x2)
print('Output support: (%+d, %+d)' % (ny[0],ny[-1]))
return y[nny], ny
def conv_integral(x1,tx1,x2,tx2,extent = ('f','f')):
"""
Continuous-time convolution of x1 and x2 with proper tracking of the output time axis.
Appromimate the convolution integral for the convolution of two continuous-time signals using the SciPy function signal. The time (sequence axis) are managed from input to output. y(t) = x1(t)*x2(t).
Parameters
----------
x1 : ndarray of signal x1 corresponding to tx1
tx1 : ndarray time axis for x1
x2 : ndarray of signal x2 corresponding to tx2
tx2 : ndarray time axis for x2
extent : ('e1','e2') where 'e1', 'e2' may be 'f' finite, 'r' right-sided, or 'l' left-sided
Returns
-------
y : ndarray of output values y
ty : ndarray of the corresponding time axis for y
Notes
-----
The output time axis starts at the sum of the starting values in x1 and x2
and ends at the sum of the two ending values in x1 and x2. The time steps used in
x1(t) and x2(t) must match. The default extents of ('f','f') are used for signals
that are active (have support) on or within t1 and t2 respectively. A right-sided
signal such as exp(-a*t)*u(t) is semi-infinite, so it has extent 'r' and the
convolution output will be truncated to display only the valid results.
Examples
--------
>>> tx = arange(-5,10,.01)
>>> x = rect(tx-2,4) # pulse starts at t = 0
>>> y,ty = conv_integral(x,tx,x,tx)
>>> plot(ty,y) # expect a triangle on [0,8]
>>> # Consider a pulse convolved with an exponential ('r' type extent)
>>> h = 4*exp(-4*tx)*step(tx)
>>> y,ty = conv_integral(x,tx,h,tx,extent=('f','r')) # note extents set
>>> plot(ty,y) # expect a pulse charge and discharge waveform
"""
dt = tx1[1] - tx1[0]
nx1 = np.arange(0,len(tx1))
nx2 = np.arange(0,len(tx2))
n1 = nx1[0]
n2 = nx1[-1]
n3 = nx2[0]
n4 = nx2[-1]
# Start by finding the valid output support or extent interval to insure that
# for no finite extent signals ambiquous results are not returned.
# Valid extents are f (finite), r (right-sided), and l (left-sided)
if extent[0] == 'f' and extent[1] == 'f':
ny = np.arange(n1+n3,n2+1+n4+1-1)
ty = np.arange(0,len(x1)+len(x2)-1)*dt + tx1[0]+tx2[0]
elif extent[0] == 'f' and extent[1] == 'r':
ny = np.arange(n1+n3,n1+1+n4+1-1)
ty = ny*dt + tx1[0]+tx2[0]
elif extent[0] == 'r' and extent[1] == 'f':
ny = np.arange(n1+n3,n2+1+n3+1-1)
ty = ny*dt + tx1[0]+tx2[0]
elif extent[0] == 'f' and extent[1] == 'l':
ny = np.arange(n2+n3,n2+1+n4+1-1)
ty = ny*dt + tx1[-1]+tx2[0]
elif extent[0] == 'l' and extent[1] == 'f':
ny = np.arange(n1+n4,n2+1+n4+1-1)
ty = ny*dt + tx1[0]+tx2[-1]
elif extent[0] == 'r' and extent[1] == 'r':
ny = np.arange(n1+n3,min(n1+1+n4+1,n2+1+n3+1)-1)
ty = ny*dt + tx1[0]+tx2[0]
elif extent[0] == 'l' and extent[1] == 'l':
ny = np.arange(max(n1+n4,n2+n3),n2+1+n4+1-1)
ty = ny*dt + max(tx1[0]+tx2[-1],tx1[-1]+tx2[0])
else:
print('Invalid x1 x2 extents specified or valid extent not found!')
return 0,0
# Finally convolve the sampled sequences and scale by dt
y = signal.convolve(x1, x2)*dt
print('Output support: (%+2.2f, %+2.2f)' % (ty[0],ty[-1]))
return y[ny], ty
def delta_eps(t,eps):
"""
Rectangular pulse approximation to impulse function.
Parameters
----------
t : ndarray of time axis
eps : pulse width
Returns
-------
d : ndarray containing the impulse approximation
Examples
--------
>>> t = arange(-2,2,.001)
>>> d = delta_eps(t,.1)
>>> plot(t,d)
"""
d = np.zeros(len(t))
for k,tt in enumerate(t):
if abs(tt) <= eps/2.:
d[k] = 1/float(eps)
return d
def step(t):
"""
Approximation to step function signal u(t).
In this numerical version of u(t) the step turns on at t = 0.
Parameters
----------
t : ndarray of the time axis
Returns
-------
x : ndarray of the step function signal u(t)
Examples
--------
>>> t = arange(-1,5,.01)
>>> x = step(t)
>>> plot(t,x)
>>> # to turn on at t = 1 shift t
>>> x = step(t - 1.0)
>>> plot(t,x)
"""
x = np.zeros(len(t))
for k,tt in enumerate(t):
if tt >= 0:
x[k] = 1.0
return x
def rect(t,tau):
"""
Approximation to the rectangle pulse Pi(t/tau).
In this numerical version of Pi(t/tau) the pulse is active
over -tau/2 <= t <= tau/2.
Parameters
----------
t : ndarray of the time axis
tau : the pulse width
Returns
-------
x : ndarray of the signal Pi(t/tau)
Examples
--------
>>> t = arange(-1,5,.01)
>>> x = rect(t,1.0)
>>> plot(t,x)
>>> # to turn on at t = 1 shift t
>>> x = rect(t - 1.0,1.0)
>>> plot(t,x)
"""
x = np.zeros(len(t))
for k,tk in enumerate(t):
if np.abs(tk) > tau/2.:
x[k] = 0
else:
x[k] = 1
return x
def tri(t,tau):
"""
Approximation to the triangle pulse Lambda(t/tau).
In this numerical version of Lambda(t/tau) the pulse is active
over -tau <= t <= tau.
Parameters
----------
t : ndarray of the time axis
tau : one half the triangle base width
Returns
-------
x : ndarray of the signal Lambda(t/tau)
Examples
--------
>>> t = arange(-1,5,.01)
>>> x = tri(t,1.0)
>>> plot(t,x)
>>> # to turn on at t = 1 shift t
>>> x = tri(t - 1.0,1.0)
>>> plot(t,x)
"""
x = np.zeros(len(t))
for k,tk in enumerate(t):
if np.abs(tk) > tau/1.:
x[k] = 0
else:
x[k] = 1 - np.abs(tk)/tau
return x
def dimpulse(n):
"""
Discrete impulse function delta[n].
Parameters
----------
n : ndarray of the time axis
Returns
-------
x : ndarray of the signal delta[n]
Examples
--------
>>> n = arange(-5,5)
>>> x = dimpulse(n)
>>> stem(n,x)
>>> # shift the delta left by 2
>>> x = dimpulse(n+2)
>>> stem(n,x)
"""
x = np.zeros(len(n))
for k,nn in enumerate(n):
if nn == 0:
x[k] = 1.0
return x
def dstep(n):
"""
Discrete step function u[n].
Parameters
----------
n : ndarray of the time axis
Returns
-------
x : ndarray of the signal u[n]
Examples
--------
>>> n = arange(-5,5)
>>> x = dstep(n)
>>> stem(n,x)
>>> # shift the delta left by 2
>>> x = dstep(n+2)
>>> stem(n,x)
"""
x = np.zeros(len(n))
for k,nn in enumerate(n):
if nn >= 0:
x[k] = 1.0
return x
def drect(n,N):
"""
Discrete rectangle function of duration N samples.
The signal is active on the interval 0 <= n <= N-1. Also known
as the rectangular window function, which is available in
scipy.signal.
Parameters
----------
n : ndarray of the time axis
N : the pulse duration
Returns
-------
x : ndarray of the signal
Notes
-----
The discrete rectangle turns on at n = 0, off at n = N-1 and
has duration of exactly N samples.
Examples
--------
>>> n = arange(-5,5)
>>> x = drect(n)
>>> stem(n,x)
>>> # shift the delta left by 2
>>> x = drect(n+2)
>>> stem(n,x)
"""
x = np.zeros(len(n))
for k,nn in enumerate(n):
if nn >= 0 and nn < N:
x[k] = 1.0
return x
def rc_imp(Ns,alpha,M=6):
"""
A truncated raised cosine pulse used in digital communications.
The pulse shaping factor 0< alpha < 1 is required as well as the
truncation factor M which sets the pulse duration to be 2*M*Tsymbol.
Parameters
----------
Ns : number of samples per symbol
alpha : excess bandwidth factor on (0, 1), e.g., 0.35
M : equals RC one-sided symbol truncation factor
Returns
-------
b : ndarray containing the pulse shape
Notes
-----
The pulse shape b is typically used as the FIR filter coefficients
when forming a pulse shaped digital communications waveform.
Examples
--------
>>> # ten samples per symbol and alpha = 0.35
>>> b = rc_imp(10,0.35)
>>> n = arange(-10*6,10*6+1)
>>> stem(n,b)
"""
# Design the filter
n = np.arange(-M*Ns,M*Ns+1)
b = np.zeros(len(n));
a = alpha;
Ns *= 1.0
for i in range(len(n)):
if (1 - 4*(a*n[i]/Ns)**2) == 0:
b[i] = np.pi/4*np.sinc(1/(2.*a))
else:
b[i] = np.sinc(n[i]/Ns)*np.cos(np.pi*a*n[i]/Ns)/(1 - 4*(a*n[i]/Ns)**2)
return b
def sqrt_rc_imp(Ns,alpha,M=6):
"""
A truncated square root raised cosine pulse used in digital communications.
The pulse shaping factor 0< alpha < 1 is required as well as the
truncation factor M which sets the pulse duration to be 2*M*Tsymbol.
Parameters
----------
Ns : number of samples per symbol
alpha : excess bandwidth factor on (0, 1), e.g., 0.35
M : equals RC one-sided symbol truncation factor
Returns
-------
b : ndarray containing the pulse shape
Notes
-----
The pulse shape b is typically used as the FIR filter coefficients
when forming a pulse shaped digital communications waveform. When
square root raised cosine (SRC) pulse is used generate Tx signals and
at the receiver used as a matched filter (receiver FIR filter), the
received signal is now raised cosine shaped, this having zero
intersymbol interference and the optimum removal of additive white
noise if present at the receiver input.
Examples
--------
>>> # ten samples per symbol and alpha = 0.35
>>> b = sqrt_rc_imp(10,0.35)
>>> n = arange(-10*6,10*6+1)
>>> stem(n,b)
"""
# Design the filter
n = np.arange(-M*Ns,M*Ns+1)
b = np.zeros(len(n))
Ns *= 1.0
a = alpha
for i in range(len(n)):
if abs(1 - 16*a**2*(n[i]/Ns)**2) <= np.finfo(np.float).eps/2:
b[i] = 1/2.*((1+a)*np.sin((1+a)*np.pi/(4.*a))-(1-a)*np.cos((1-a)*np.pi/(4.*a))+(4*a)/np.pi*np.sin((1-a)*np.pi/(4.*a)))
else:
b[i] = 4*a/(np.pi*(1 - 16*a**2*(n[i]/Ns)**2))
b[i] = b[i]*(np.cos((1+a)*np.pi*n[i]/Ns) + np.sinc((1-a)*n[i]/Ns)*(1-a)*np.pi/(4.*a))
return b
def PN_gen(N_bits,m=5):
"""
Maximal length sequence signal generator.
Generates a sequence 0/1 bits of N_bit duration. The bits themselves
are obtained from an m-sequence of length m. Available m-sequence
(PN generators) include m = 2,3,...,12, & 16.
Parameters
----------
N_bits : the number of bits to generate
m : the number of shift registers. 2,3, .., 12, & 16
Returns
-------
PN : ndarray of the generator output over N_bits
Notes
-----
The sequence is periodic having period 2**m - 1 (2^m - 1).
Examples
--------
>>> # A 15 bit period signal nover 50 bits
>>> PN = PN_gen(50,4)
"""
c = m_seq(m)
Q = len(c)
max_periods = int(np.ceil(N_bits/float(Q)))
PN = np.zeros(max_periods*Q)
for k in range(max_periods):
PN[k*Q:(k+1)*Q] = c
PN = np.resize(PN, (1,N_bits))
return PN.flatten()
def m_seq(m):
"""
Generate an m-sequence ndarray using an all-ones initialization.
Available m-sequence (PN generators) include m = 2,3,...,12, & 16.
Parameters
----------
m : the number of shift registers. 2,3, .., 12, & 16
Returns
-------
c : ndarray of one period of the m-sequence
Notes
-----
The sequence period is 2**m - 1 (2^m - 1).
Examples
--------
>>> c = m_seq(5)
"""
# Load shift register with all ones to start
sr = np.ones(m)
# M-squence length is:
Q = 2**m - 1
c = np.zeros(Q)
if m == 2:
taps = np.array([1, 1, 1])
elif m == 3:
taps = np.array([1, 0, 1, 1])
elif m == 4:
taps = np.array([1, 0, 0, 1, 1])
elif m == 5:
taps = np.array([1, 0, 0, 1, 0, 1])
elif m == 6:
taps = np.array([1, 0, 0, 0, 0, 1, 1])
elif m == 7:
taps = np.array([1, 0, 0, 0, 1, 0, 0, 1])
elif m == 8:
taps = np.array([1, 0, 0, 0, 1, 1, 1, 0, 1])
elif m == 9:
taps = np.array([1, 0, 0, 0, 0, 1, 0, 0, 0, 1])
elif m == 10:
taps = np.array([1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1])
elif m == 11:
taps = np.array([1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1])
elif m == 12:
taps = np.array([1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1])
elif m == 16:
taps = np.array([1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1])
else:
print('Invalid length specified')
for n in range(Q):
tap_xor = 0
c[n] = sr[-1]
for k in range(1,m):
if taps[k] == 1:
tap_xor = np.bitwise_xor(tap_xor,np.bitwise_xor(int(sr[-1]),int(sr[m-1-k])))
sr[1:] = sr[:-1]
sr[0] = tap_xor
return c
def BPSK_tx(N_bits,Ns,ach_fc=2.0,ach_lvl_dB=-100,pulse='rect',alpha = 0.25,M=6):
"""
Genrates biphase shift keyed (BPSK) transmitter with adjacent channel interference.
Generates three BPSK signals with rectangular or square root raised cosine (SRC)
pulse shaping of duration N_bits and Ns samples per bit. The desired signal is
centered on f = 0, which the adjacent channel signals to the left and right
are also generated at dB level relative to the desired signal. Used in the
digital communications Case Study supplement.
Parameters
----------
N_bits : the number of bits to simulate
Ns : the number of samples per bit
ach_fc : the frequency offset of the adjacent channel signals (default 2.0)
ach_lvl_dB : the level of the adjacent channel signals in dB (default -100)
pulse :the pulse shape 'rect' or 'src'
alpha : square root raised cosine pulse shape factor (default = 0.25)
M : square root raised cosine pulse truncation factor (default = 6)
Returns
-------
x : ndarray of the composite signal x0 + ach_lvl*(x1p + x1m)
b : the transmit pulse shape
data0 : the data bits used to form the desired signal; used for error checking
Notes
-----
Examples
--------
>>> x,b,data0 = BPSK_tx(1000,10,'src')
"""
x0,b,data0 = NRZ_bits(N_bits,Ns,pulse,alpha,M)
x1p,b,data1p = NRZ_bits(N_bits,Ns,pulse,alpha,M)
x1m,b,data1m = NRZ_bits(N_bits,Ns,pulse,alpha,M)
n = np.arange(len(x0))
x1p = x1p*np.exp(1j*2*np.pi*ach_fc/float(Ns)*n)
x1m = x1m*np.exp(-1j*2*np.pi*ach_fc/float(Ns)*n)
ach_lvl = 10**(ach_lvl_dB/20.)
return x0 + ach_lvl*(x1p + x1m), b, data0
#def BPSK_rx(r,b,):
def NRZ_bits(N_bits,Ns,pulse='rect',alpha = 0.25,M=6):
"""
Generate non-return-to-zero (NRZ) data bits with pulse shaping.
A baseband digital data signal using +/-1 amplitude signal values
and including pulse shaping.
Parameters
----------
N_bits : number of NRZ +/-1 data bits to produce
Ns : the number of samples per bit,
pulse_type : 'rect' , 'rc', 'src' (default 'rect')
alpha : excess bandwidth factor(default 0.25)
M : single sided pulse duration (default = 6)
Returns
-------
x : ndarray of the NRZ signal values
b : ndarray of the pulse shape
data : ndarray of the underlying data bits
Notes
-----
Pulse shapes include 'rect' (rectangular), 'rc' (raised cosine),
'src' (root raised cosine). The actual pulse length is 2*M+1 samples.
This function is used by BPSK_tx in the Case Study article.
Examples
--------
>>> x,b,data = NRZ_bits(100,10)
>>> t = arange(len(x))
>>> plot(t,x)
"""
data = np.random.randint(0,2,N_bits)
x = np.hstack((2*data.reshape(N_bits,1)-1,np.zeros((N_bits,Ns-1))))
x =x.flatten()
if pulse.lower() == 'rect':
b = np.ones(Ns)
elif pulse.lower() == 'rc':
b = rc_imp(Ns,alpha,M)
elif pulse.lower() == 'src':
b = sqrt_rc_imp(Ns,alpha,M)
else:
print('pulse type must be rec, rc, or src')
x = signal.lfilter(b,1,x)
return x,b/float(Ns),data
def NRZ_bits2(data,Ns,pulse='rect',alpha = 0.25,M=6):
"""
Generate non-return-to-zero (NRZ) data bits with pulse shaping with user data
A baseband digital data signal using +/-1 amplitude signal values
and including pulse shaping. The data sequence is user supplied.
Parameters
----------
data : ndarray of the data bits as 0/1 values
Ns : the number of samples per bit,
pulse_type : 'rect' , 'rc', 'src' (default 'rect')
alpha : excess bandwidth factor(default 0.25)
M : single sided pulse duration (default = 6)
Returns
-------
x : ndarray of the NRZ signal values
b : ndarray of the pulse shape
Notes
-----
Pulse shapes include 'rect' (rectangular), 'rc' (raised cosine),
'src' (root raised cosine). The actual pulse length is 2*M+1 samples.
Examples
--------
>>> x,b = NRZ_bits2([m_seq(5),10)
>>> t = arange(len(x))
>>> plot(t,x)
"""
N_bits = len(data)
x = np.hstack((2*data.reshape(N_bits,1)-1,np.zeros((N_bits,Ns-1))))
x = x.flatten()
if pulse.lower() == 'rect':
b = np.ones(Ns)
elif pulse.lower() == 'rc':
b = rc_imp(Ns,alpha,M)
elif pulse.lower() == 'src':
b = sqrt_rc_imp(Ns,alpha,M)
else:
print('pulse type must be rec, rc, or src')
x = signal.lfilter(b,1,x)
return x,b/float(Ns)
def eye_plot(x,L,S=0):
"""
Eye pattern plot of a baseband digital communications waveform.
The signal must be real, but can be multivalued in terms of the underlying
modulation scheme. Used for BPSK eye plots in the Case Study article.
Parameters
----------
x : ndarray of the real input data vector/array
L : display length in samples (usually two symbols)
S : start index
Returns
-------
Nothing : A plot window opens containing the eye plot
Notes
-----
Increase S to eliminate filter transients.
Examples
--------
>>> # 1000 bits at 10 samples per bit with 'rc' shaping
>>> x,b, data = NRZ_bits(1000,10,'rc')
>>> eye_plot(x,20,60)
"""
plt.figure(figsize=(6,4))
idx = np.arange(0,L+1)
plt.plot(idx,x[S:S+L+1],'b')
k_max = int((len(x) - S)/L)-1
for k in range(1,k_max):
plt.plot(idx,x[S+k*L:S+L+1+k*L],'b')
plt.grid()
plt.xlabel('Time Index - n')
plt.ylabel('Amplitude')
plt.title('Eye Plot')
return 0
def scatter(x,Ns,start):
"""
Sample a baseband digital communications waveform at the symbol spacing.
Parameters
----------
x : ndarray of the input digital comm signal
Ns : number of samples per symbol (bit)
start : the array index to start the sampling
Returns
-------
xI : ndarray of the real part of x following sampling
xQ : ndarray of the imaginary part of x following sampling
Notes
-----
Normally the signal is complex, so the scatter plot contains
clusters at point in the complex plane. For a binary signal
such as BPSK, the point centers are nominally +/-1 on the real
axis. Start is used to eliminate transients from the FIR
pulse shaping filters from appearing in the scatter plot.
Examples
--------
>>> x,b, data = NRZ_bits(1000,10,'rc')
>>> # add some noise so points are now scattered about +/-1
>>> y = cpx_AWGN(x,20,10)
>>> yI,yQ = scatter(y,10,60)
>>> plot(yI,yQ,'.')
>>> axis('equal')
"""
xI = np.real(x[start::Ns])
xQ = np.imag(x[start::Ns])
return xI, xQ
def bit_errors(z,data,start,Ns):
"""
A simple bit error counting function.
In its present form this function counts bit errors between
hard decision BPSK bits in +/-1 form and compares them with
0/1 binary data that was transmitted. Timing between the Tx
and Rx data is the responsibility of the user. An enhanced
version of this function, which features automatic synching
will be created in the future.
Parameters
----------
z : ndarray of hard decision BPSK data prior to symbol spaced sampling
data : ndarray of reference bits in 1/0 format
start : timing reference for the received
Ns : the number of samples per symbol
Returns
-------
Pe_hat : the estimated probability of a bit error
Notes
-----
The Tx and Rx data streams are exclusive-or'd and the then the bit errors
are summed, and finally divided by the number of bits observed to form an
estimate of the bit error probability. This function needs to be
enhanced to be more useful.
Examples
--------
>>> from scipy import signal
>>> x,b, data = NRZ_bits(1000,10)
>>> # set Eb/N0 to 8 dB
>>> y = cpx_AWGN(x,8,10)
>>> # matched filter the signal
>>> z = signal.lfilter(b,1,y)
>>> # make bit decisions at 10 and Ns multiples thereafter
>>> Pe_hat = bit_errors(z,data,10,10)
"""
Pe_hat = np.sum(data[0:len(z[start::Ns])]^np.int64((np.sign(np.real(z[start::Ns]))+1)/2))/float(len(z[start::Ns]))
return Pe_hat
def cpx_AWGN(x,EsN0,Ns):
"""
Apply white Gaussian noise to a digital communications signal.
This function represents a complex baseband white Gaussian noise
digital communications channel. The input signal array may be real
or complex.
Parameters
----------
x : ndarray noise free complex baseband input signal.
EsNO : set the channel Es/N0 (Eb/N0 for binary) level in dB
Ns : number of samples per symbol (bit)
Returns
-------
y : ndarray x with additive noise added.
Notes
-----
Set the channel energy per symbol-to-noise power spectral
density ratio (Es/N0) in dB.
Examples
--------
>>> x,b, data = NRZ_bits(1000,10)
>>> # set Eb/N0 = 10 dB
>>> y = cpx_AWGN(x,10,10)
"""
w = np.sqrt(Ns*np.var(x)*10**(-EsN0/10.)/2.)*(np.random.randn(len(x)) + 1j*np.random.randn(len(x)))
return x+w
def my_psd(x,NFFT=2**10,Fs=1):
"""
A local version of NumPy's PSD function that returns the plot arrays.
A mlab.psd wrapper function that returns two ndarrays;
makes no attempt to auto plot anything.
Parameters
----------
x : ndarray input signal
NFFT : a power of two, e.g., 2**10 = 1024
Fs : the sampling rate in Hz
Returns
-------
Px : ndarray of the power spectrum estimate
f : ndarray of frequency values
Notes
-----
This function makes it easier to overlay spectrum plots because
you have better control over the axis scaling than when using psd()
in the autoscale mode.
Examples
--------
>>> x,b, data = NRZ_bits(10000,10)
>>> Px,f = my_psd(x,2**10,10)
>>> plot(f, 10*log10(Px))
"""
Px,f = pylab.mlab.psd(x,NFFT,Fs)
return Px.flatten(), f
def am_tx(m,a_mod,fc=75e3):
"""
AM transmitter for Case Study of Chapter 17.
Assume input is sampled at 8 Ksps and upsampling
by 24 is performed to arrive at fs_out = 192 Ksps.
Parameters
----------
m : ndarray of the input message signal
a_mod : AM modulation index, between 0 and 1
fc : the carrier frequency in Hz
Returns
-------
x192 : ndarray of the upsampled by 24 and modulated carrier
t192 : ndarray of the upsampled by 24 time axis
m24 : ndarray of the upsampled by 24 message signal
Notes
-----
The sampling rate of the input signal is assumed to be 8 kHz.
Examples
--------
>>> n = arange(0,1000)
>>> # 1 kHz message signal
>>> m = cos(2*pi*1000/8000.*n)
>>> x192, t192 = am_tx(m,0.8,fc=75e3)
"""
m24 = interp24(m)
t192 = np.arange(len(m24))/192.0e3
#m24 = np.cos(2*np.pi*2.0e3*t192)
m_max = np.max(np.abs(m24))
x192 = (1 + a_mod*m24/m_max)*np.cos(2*np.pi*fc*t192)
return x192, t192, m24
def am_rx(x192):
"""
AM envelope detector receiver for the Chapter 17 Case Study
The receiver bandpass filter is not included in this function.
Parameters
----------
x192 : ndarray of the AM signal at sampling rate 192 ksps
Returns
-------
m_rx8 : ndarray of the demodulated message at 8 ksps
t8 : ndarray of the time axis at 8 ksps
m_rx192 : ndarray of the demodulated output at 192 ksps
x_edet192 : ndarray of the envelope detector output at 192 ksps
Notes
-----
The bandpass filter needed at the receiver front-end can be designed
using b_bpf,a_bpf = am_rx_BPF().
Examples
--------
>>> n = arange(0,1000)
>>> # 1 kHz message signal
>>> m = cos(2*pi*1000/8000.*n)
>>> m_rx8,t8,m_rx192,x_edet192 = am_rx(x192)
"""
x_edet192 = env_det(x192)
m_rx8 = deci24(x_edet192)
# remove DC offset from the env_det + LPF output
m_rx8 -= np.mean(m_rx8)
t8 = np.arange(len(m_rx8))/8.0e3
"""
For performance testing also filter x_env_det
192e3 using a Butterworth cascade.
The filter cutoff is 5kHz, the message BW.
"""
b192,a192 = signal.butter(5,2*5.0e3/192.0e3)
m_rx192 = signal.lfilter(b192,a192,x_edet192)
m_rx192 = signal.lfilter(b192,a192,m_rx192)
m_rx192 -= np.mean(m_rx192)
return m_rx8,t8,m_rx192,x_edet192
def am_rx_BPF(N_order = 7, ripple_dB = 1, B = 10e3, fs = 192e3):
"""
Bandpass filter design for the AM receiver Case Study of Chapter 17.
Design a 7th-order Chebyshev type 1 bandpass filter to remove/reduce
adjacent channel intereference at the envelope detector input.
Parameters
----------
N_order : the filter order (default = 7)
ripple_dB : the passband ripple in dB (default = 1)
B : the RF bandwidth (default = 10e3)
fs : the sampling frequency
Returns
-------
b_bpf : ndarray of the numerator filter coefficients
a_bpf : ndarray of the denominator filter coefficients
Examples
--------
>>> from scipy import signal
>>> # Use the default values
>>> b_bpf,a_bpf = am_rx_BPF()
>>> # plot the filter pole-zero plot
>>> zplane(b_bpf,a_bpf)
>>> # plot the frequency response
>>> f = arange(0,192/2.,.1)
>>> w, Hbpf = signal.freqz(b_bpf,a_bpf,2*pi*f/192)
>>> plot(f,20*log10(abs(Hbpf)))
>>> axis([0,192/2.,-80,10])
"""
b_bpf,a_bpf = signal.cheby1(N_order,ripple_dB,2*np.array([75e3-B/2.,75e3+B/2.])/fs,'bandpass')
return b_bpf,a_bpf
def env_det(x):
"""
Ideal envelope detector.
This function retains the positive half cycles of the input signal.
Parameters
----------
x : ndarray of the input sugnal
Returns
-------
y : ndarray of the output signal
Examples
--------
>>> n = arange(0,100)
>>> # 1 kHz message signal
>>> m = cos(2*pi*1000/8000.*n)
>>> x192, t192 = am_tx(m,0.8,fc=75e3)
>>> y = env_det(x192)
"""
y = np.zeros(len(x))
for k,xx in enumerate(x):
if xx >= 0:
y[k] = xx
return y
def interp24(x):
"""
Interpolate by L = 24 using Butterworth filters.
The interpolation is done using three stages. Upsample by
L = 2 and lowpass filter, upsample by 3 and lowpass filter, then
upsample by L = 4 and lowpass filter. In all cases the lowpass
filter is a 10th-order Butterworth lowpass.
Parameters
----------
x : ndarray of the input signal
Returns
-------
y : ndarray of the output signal
Notes
-----
The cutoff frequency of the lowpass filters is 1/2, 1/3, and 1/4 to
track the upsampling by 2, 3, and 4 respectively.
Examples
--------
>>> y = interp24(x)
"""
# Stage 1: L = 2
b2,a2 = signal.butter(10,1/2.)
y1 = upsample(x,2)
y1 = signal.lfilter(b2,a2,2*y1)
# Stage 2: L = 3
b3,a3 = signal.butter(10,1/3.)
y2 = upsample(y1,3)
y2 = signal.lfilter(b3,a3,3*y2)
# Stage 3: L = 4
b4,a4 = signal.butter(10,1/4.)
y3 = upsample(y2,4)
y3 = signal.lfilter(b4,a4,4*y3)
return y3
def deci24(x):
"""
Decimate by L = 24 using Butterworth filters.
The decimation is done using two three stages. Downsample sample by
L = 2 and lowpass filter, downsample by 3 and lowpass filter, then
downsample by L = 4 and lowpass filter. In all cases the lowpass
filter is a 10th-order Butterworth lowpass.
Parameters
----------
x : ndarray of the input signal
Returns
-------
y : ndarray of the output signal
Notes
-----
The cutoff frequency of the lowpass filters is 1/2, 1/3, and 1/4 to
track the upsampling by 2, 3, and 4 respectively.
Examples
--------
>>> y = deci24(x)
"""
# Stage 1: M = 2
b2,a2 = signal.butter(10,1/2.)
y1 = signal.lfilter(b2,a2,x)
y1 = downsample(y1,2)
# Stage 2: M = 3
b3,a3 = signal.butter(10,1/3.)
y2 = signal.lfilter(b3,a3,y1)
y2 = downsample(y2,3)
# Stage 3: L = 4
b4,a4 = signal.butter(10,1/4.)
y3 = signal.lfilter(b4,a4,y2)
y3 = downsample(y3,4)
return y3
def upsample(x,L):
"""
Upsample by factor L
Insert L - 1 zero samples in between each input sample.
Parameters
----------
x : ndarray of input signal values
L : upsample factor
Returns
-------
y : ndarray of the output signal values
Examples
--------
>>> y = upsample(x,3)
"""
N_input = len(x)
y = np.hstack((x.reshape(N_input,1),np.zeros((N_input,L-1))))
y = y.flatten()
return y
def downsample(x,M,p=0):
"""
Downsample by factor M
Keep every Mth sample of the input. The phase of the input samples
kept can be selected.
Parameters
----------
x : ndarray of input signal values
M : upsample factor
p : phase of decimated value, 0 (default), 1, ..., M-1
Returns
-------
y : ndarray of the output signal values
Examples
--------
>>> y = downsample(x,3)
>>> y = downsample(x,3,1)
"""
x = x[0:int(np.floor(len(x)/M))*M]
x = x.reshape((int(np.floor(len(x)/M)),M))
y = x[:,p]
return y
def unique_cpx_roots(rlist,tol = 0.001):
"""
The average of the root values is used when multiplicity
is greater than one.
Mark Wickert October 2016
"""
uniq = [rlist[0]]
mult = [1]
for k in range(1,len(rlist)):
N_uniq = len(uniq)
for m in range(N_uniq):
if abs(rlist[k]-uniq[m]) <= tol:
mult[m] += 1
uniq[m] = (uniq[m]*(mult[m]-1) + rlist[k])/float(mult[m])
break
uniq = np.hstack((uniq,rlist[k]))
mult = np.hstack((mult,[1]))
return np.array(uniq), np.array(mult)
def zplane(b,a,auto_scale=True,size=2,detect_mult=True,tol=0.001):
"""
Create an z-plane pole-zero plot.
Create an z-plane pole-zero plot using the numerator
and denominator z-domain system function coefficient
ndarrays b and a respectively. Assume descending powers of z.
Parameters
----------
b : ndarray of the numerator coefficients
a : ndarray of the denominator coefficients
auto_scale : bool (default True)
size : plot radius maximum when scale = False
Returns
-------
(M,N) : tuple of zero and pole counts + plot window
Notes
-----
This function tries to identify repeated poles and zeros and will
place the multiplicity number above and to the right of the pole or zero.
The difficulty is setting the tolerance for this detection. Currently it
is set at 1e-3 via the function signal.unique_roots.
Examples
--------
>>> # Here the plot is generated using auto_scale
>>> zplane(b,a)
>>> # Here the plot is generated using manual scaling
>>> zplane(b,a,False,1.5)
"""
M = len(b) - 1
N = len(a) - 1
# Plot labels if multiplicity greater than 1
x_scale = 1.5*size
y_scale = 1.5*size
x_off = 0.02
y_off = 0.01
#N_roots = np.array([1.0])
if M > 0:
N_roots = np.roots(b)
#D_roots = np.array([1.0])
if N > 0:
D_roots = np.roots(a)
if auto_scale:
if M > 0 and N > 0:
size = max(np.max(np.abs(N_roots)),np.max(np.abs(D_roots)))+.1
elif M > 0:
size = max(np.max(np.abs(N_roots)),1.0)+.1
elif N > 0:
size = max(1.0,np.max(np.abs(D_roots)))+.1
else:
size = 1.1
plt.figure(figsize=(5,5))
plt.axis('equal')
r = np.linspace(0,2*np.pi,200)
plt.plot(np.cos(r),np.sin(r),'r--')
plt.plot([-size,size],[0,0],'k-.')
plt.plot([0,0],[-size,size],'k-.')
if M > 0:
if detect_mult == True:
N_uniq, N_mult = unique_cpx_roots(N_roots,tol=tol)
plt.plot(np.real(N_uniq),np.imag(N_uniq),'ko',mfc='None',ms=8)
idx_N_mult = mlab.find(N_mult>1)
for k in range(len(idx_N_mult)):
x_loc = np.real(N_uniq[idx_N_mult[k]]) + x_off*x_scale
y_loc =np.imag(N_uniq[idx_N_mult[k]]) + y_off*y_scale
plt.text(x_loc,y_loc,str(N_mult[idx_N_mult[k]]),ha='center',va='bottom',fontsize=10)
else:
plt.plot(np.real(N_roots),np.imag(N_roots),'ko',mfc='None',ms=8)
if N > 0:
if detect_mult == True:
D_uniq, D_mult=unique_cpx_roots(D_roots,tol=tol)
plt.plot(np.real(D_uniq),np.imag(D_uniq),'kx',ms=8)
idx_D_mult = mlab.find(D_mult>1)
for k in range(len(idx_D_mult)):
x_loc = np.real(D_uniq[idx_D_mult[k]]) + x_off*x_scale
y_loc =np.imag(D_uniq[idx_D_mult[k]]) + y_off*y_scale
plt.text(x_loc,y_loc,str(D_mult[idx_D_mult[k]]),ha='center',va='bottom',fontsize=10)
else:
plt.plot(np.real(D_roots),np.imag(D_roots),'kx',ms=8)
if M - N < 0:
plt.plot(0.0,0.0,'bo',mfc='None',ms=8)
elif M - N > 0:
plt.plot(0.0,0.0,'kx',ms=8)
if abs(M - N) > 1:
plt.text(x_off*x_scale,y_off*y_scale,str(abs(M-N)),ha='center',va='bottom',fontsize=10)
plt.xlabel('Real Part')
plt.ylabel('Imaginary Part')
plt.title('Pole-Zero Plot')
#plt.grid()
plt.axis([-size,size,-size,size])
return M,N
def rect_conv(n,N_len):
"""
The theoretical result of convolving two rectangle sequences.
The result is a triangle. The solution is
based on pure analysis. Simply coded as opposed
to efficiently coded.
Parameters
----------
n : ndarray of time axis
N_len : rectangle pulse duration
Returns
-------
y : ndarray of of output signal
Examples
--------
>>> n = arange(-5,20)
>>> y = rect_conv(n,6)
"""
y = np.zeros(len(n))
for k in range(len(n)):
if n[k] >= 0 and n[k] < N_len-1:
y[k] = n[k] + 1
elif n[k] >= N_len-1 and n[k] <= 2*N_len-2:
y[k] = 2*N_len-1-n[k]
return y
def biquad2(w_num, r_num, w_den, r_den):
"""
A biquadratic filter in terms of conjugate pole and zero pairs.
Parameters
----------
w_num : zero frequency (angle) in rad/sample
r_num : conjugate zeros radius
w_den : pole frequency (angle) in rad/sample
r_den : conjugate poles radius; less than 1 for stability
Returns
-------
b : ndarray of numerator coefficients
a : ndarray of denominator coefficients
Examples
--------
b,a = biquad2(pi/4., 1, pi/4., 0.95)
"""
b = np.array([1, -2*r_num*np.cos(w_num), r_num**2])
a = np.array([1, -2*r_den*np.cos(w_den), r_den**2])
return b, a
def plot_na(x,y,mode='stem'):
pylab.figure(figsize=(5,2))
frame1 = pylab.gca()
if mode.lower() == 'stem':
pylab.stem(x,y)
else:
pylab.plot(x,y)
frame1.axes.get_xaxis().set_visible(False)
frame1.axes.get_yaxis().set_visible(False)
pylab.show()
def from_wav(filename):
"""
Read a wave file.
A wrapper function for scipy.io.wavfile.read
that also includes int16 to float [-1,1] scaling.
Parameters
----------
filename : file name string
Returns
-------
fs : sampling frequency in Hz
x : ndarray of normalized to 1 signal samples
Examples
--------
>>> fs,x = from_wav('test_file.wav')
"""
fs, x = wavfile.read(filename)
return fs, x/32767.
def to_wav(filename,rate,x):
"""
Write a wave file.
A wrapper function for scipy.io.wavfile.write
that also includes int16 scaling and conversion.
Assume input x is [-1,1] values.
Parameters
----------
filename : file name string
rate : sampling frequency in Hz
Returns
-------
Nothing : writes only the *.wav file
Examples
--------
>>> to_wav('test_file.wav', 8000, x)
"""
x16 = np.int16(x*32767)
wavfile.write(filename, rate, x16)
if __name__ == '__main__':
b = CIC(10,1)
print(b)
"""
x = np.random.randn(10)
print(x)
b = signal.remez(16,[0,.1,.2,.5], [1,0], [1,1], 1)
w,H = signal.freqz(b,[1],512)
plot(w,20*log10(abs(H)))
figure(figsize=(6,4))
#plot(arange(0,len(b)),b)
y = signal.lfilter(b, [1], x,)
print(y)
zplane([1,1,1,1,1],[1,-.8],1.25)
"""
| bsd-2-clause |
jay3sh/vispy | vispy/visuals/isocurve.py | 18 | 7809 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
from __future__ import division
import numpy as np
from .line import LineVisual
from ..color import ColorArray
from ..color.colormap import _normalize, get_colormap
from ..geometry.isocurve import isocurve
from ..testing import has_matplotlib
# checking for matplotlib
_HAS_MPL = has_matplotlib()
if _HAS_MPL:
from matplotlib import _cntr as cntr
class IsocurveVisual(LineVisual):
"""Displays an isocurve of a 2D scalar array.
Parameters
----------
data : ndarray | None
2D scalar array.
levels : ndarray, shape (Nlev,) | None
The levels at which the isocurve is constructed from "*data*".
color_lev : Color, colormap name, tuple, list or array
The color to use when drawing the line. If a list is given, it
must be of shape (Nlev), if an array is given, it must be of
shape (Nlev, ...). and provide one color per level (rgba, colorname).
clim : tuple
(min, max) limits to apply when mapping level values through a
colormap.
**kwargs : dict
Keyword arguments to pass to `LineVisual`.
Notes
-----
"""
def __init__(self, data=None, levels=None, color_lev=None, clim=None,
**kwargs):
self._data = None
self._levels = levels
self._color_lev = color_lev
self._clim = clim
self._need_color_update = True
self._need_level_update = True
self._need_recompute = True
self._X = None
self._Y = None
self._iso = None
self._level_min = None
self._data_is_uniform = False
self._lc = None
self._cl = None
self._li = None
self._connect = None
self._verts = None
kwargs['method'] = 'gl'
kwargs['antialias'] = False
LineVisual.__init__(self, **kwargs)
if data is not None:
self.set_data(data)
@property
def levels(self):
""" The threshold at which the isocurve is constructed from the
2D data.
"""
return self._levels
@levels.setter
def levels(self, levels):
self._levels = levels
self._need_level_update = True
self._need_recompute = True
self.update()
@property
def color(self):
return self._color_lev
@color.setter
def color(self, color):
self._color_lev = color
self._need_level_update = True
self._need_color_update = True
self.update()
def set_data(self, data):
""" Set the scalar array data
Parameters
----------
data : ndarray
A 2D array of scalar values. The isocurve is constructed to show
all locations in the scalar field equal to ``self.levels``.
"""
self._data = data
# if using matplotlib isoline algorithm we have to check for meshgrid
# and we can setup the tracer object here
if _HAS_MPL:
if self._X is None or self._X.T.shape != data.shape:
self._X, self._Y = np.meshgrid(np.arange(data.shape[0]),
np.arange(data.shape[1]))
self._iso = cntr.Cntr(self._X, self._Y, self._data.astype(float))
if self._clim is None:
self._clim = (data.min(), data.max())
# sanity check,
# should we raise an error here, since no isolines can be drawn?
# for now, _prepare_draw returns False if no isoline can be drawn
if self._data.min() != self._data.max():
self._data_is_uniform = False
else:
self._data_is_uniform = True
self._need_recompute = True
self.update()
def _get_verts_and_connect(self, paths):
""" retrieve vertices and connects from given paths-list
"""
verts = np.vstack(paths)
gaps = np.add.accumulate(np.array([len(x) for x in paths])) - 1
connect = np.ones(gaps[-1], dtype=bool)
connect[gaps[:-1]] = False
return verts, connect
def _compute_iso_line(self):
""" compute LineVisual vertices, connects and color-index
"""
level_index = []
connects = []
verts = []
# calculate which level are within data range
# this works for now and the existing examples, but should be tested
# thoroughly also with the data-sanity check in set_data-function
choice = np.nonzero((self.levels > self._data.min()) &
(self._levels < self._data.max()))
levels_to_calc = np.array(self.levels)[choice]
# save minimum level index
self._level_min = choice[0][0]
for level in levels_to_calc:
# if we use matplotlib isoline algorithm we need to add half a
# pixel in both (x,y) dimensions because isolines are aligned to
# pixel centers
if _HAS_MPL:
nlist = self._iso.trace(level, level, 0)
paths = nlist[:len(nlist)//2]
v, c = self._get_verts_and_connect(paths)
v += np.array([0.5, 0.5])
else:
paths = isocurve(self._data.astype(float).T, level,
extend_to_edge=True, connected=True)
v, c = self._get_verts_and_connect(paths)
level_index.append(v.shape[0])
connects.append(np.hstack((c, [False])))
verts.append(v)
self._li = np.hstack(level_index)
self._connect = np.hstack(connects)
self._verts = np.vstack(verts)
def _compute_iso_color(self):
""" compute LineVisual color from level index and corresponding color
"""
level_color = []
colors = self._lc
for i, index in enumerate(self._li):
level_color.append(np.zeros((index, 4)) +
colors[i+self._level_min])
self._cl = np.vstack(level_color)
def _levels_to_colors(self):
# computes ColorArrays for given levels
# try _color_lev as colormap, except as everything else
try:
f_color_levs = get_colormap(self._color_lev)
except:
colors = ColorArray(self._color_lev).rgba
else:
lev = _normalize(self._levels, self._clim[0], self._clim[1])
# map function expects (Nlev,1)!
colors = f_color_levs.map(lev[:, np.newaxis])
# broadcast to (nlev, 4) array
if len(colors) == 1:
colors = colors * np.ones((len(self._levels), 1))
# detect color_lev/levels mismatch and raise error
if (len(colors) != len(self._levels)):
raise TypeError("Color/level mismatch. Color must be of shape "
"(Nlev, ...) and provide one color per level")
self._lc = colors
def _prepare_draw(self, view):
if (self._data is None or self._levels is None or
self._color_lev is None or self._data_is_uniform):
return False
if self._need_level_update:
self._levels_to_colors()
self._need_level_update = False
if self._need_recompute:
self._compute_iso_line()
self._compute_iso_color()
LineVisual.set_data(self, pos=self._verts, connect=self._connect,
color=self._cl)
self._need_recompute = False
if self._need_color_update:
self._compute_iso_color()
LineVisual.set_data(self, color=self._cl)
self._need_color_update = False
return LineVisual._prepare_draw(self, view)
| bsd-3-clause |
WilliamYi96/Machine-Learning | Deep-Learning-Specialization/Convolutional-Neural-Networks/CNN-Applications.py | 1 | 1996 | import math
import numpy as np
import h5py
import matplotlib.pyplot as plt
from PIL import Image
from scipy import ndimage
import tensorflow as tf
from tensorflow.python.framework import ops
from cnn_utils import *
np.random.seed(1)
# Loading the data (signs)
X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset()
# Example of a picture
index = 5
plt.imshow(X_train_orig[index])
# print(Y_train_orig.shape, np.squeeze(Y_train_orig).shape)
print('y = ' + str(np.squeeze(Y_train_orig)[index]))
# print('y = ' + str(np.squeeze(Y_train_orig[:,index])))
plt.show()
X_train = X_train_orig / 255.
X_test = X_test_orig / 255.
Y_train = convert_to_one_hot(Y_train_orig, 6).T
Y_test = convert_to_one_hot(Y_test_orig, 6).T
print ("number of training examples = " + str(X_train.shape[0]))
print ("number of test examples = " + str(X_test.shape[0]))
print ("X_train shape: " + str(X_train.shape))
print ("Y_train shape: " + str(Y_train.shape))
print ("X_test shape: " + str(X_test.shape))
print ("Y_test shape: " + str(Y_test.shape))
conv_layers = {}
def create_placeholders(n_H0, n_W0, n_C0, n_y):
'''
Creates the placeholders for the tensorflow session.
Arguments:
n_H0 -- scalar, height of an input image
n_W0 -- scalar, width of an input image
n_C0 -- scalar, number of channels of the input
n_y -- scalar, number of classes
Returns:
X -- placeholder for the data input, of shape [None, n_H0, n_W0, n_C0] and dtype 'float'
Y -- placeholder for the input labels, of shape [None, n_y] and dtype 'float'
'''
X = tf.placeholder(shape=[None, n_H0, n_W0, n_C0], dtype=tf.float32)
Y = tf.placeholder(shape=[None, n_y], dtype=tf.float32)
return X, Y
X, Y = create_placeholders(64, 64, 3, 6)
print('X = {}'.format(X))
print('Y = {}'.format(Y))
# Continue from Initialize parameters.
# https://walzqyuibvvdjprisbmedy.coursera-apps.org/notebooks/week1/Convolution_model_Application_v1a.ipynb#1.2---Initialize-parameters | apache-2.0 |
jairot/meliscore | meliscore/front/dataset.py | 1 | 4373 | import pandas as pd
import requests
import json
import collections
from datetime import datetime
from queries import *
import numpy as np
from pandas import DataFrame
import os
URL_BASE = "https://api.mercadolibre.com/"
def get_selling_speeds(itemids):
"""
Given a list of itemids it calculates
the number of items sold by hour since
the beginning of the sale
"""
data = get_items(itemids, ["id","start_time","sold_quantity", "price"])
data = pd.read_json(json.dumps(data))
data['elapsed_time'] = datetime.now() - data.start_time
# data['elapsed_hours'] = data.elapsed_time / np.timedelta64(1,'h')
data['elapsed_days'] = data.elapsed_time / np.timedelta64(1,'D')
data['speed'] = data.sold_quantity / data.elapsed_days
return data[['price', 'speed']]
def simplify_item(item, prefix, sep):
"""
Given an item result from the API
it removes all nested information and returns
a plain json that is more dataframe-friendly
"""
items = []
for k, v in item.items():
new_key = prefix + sep + k if prefix else k
if isinstance(v, collections.MutableMapping):
items.extend(simplify_item(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
def price_quantiles(df):
if 'price' in df.columns:
prices = df['price']
first, second, third = prices.quantile([.25, .5, .75])
q = {'first quantile': first,
'second quantile': second,
'third quantile': third}
return q
else:
raise NameError('price column does not exist')
def find_seller_score(users):
scores = []
for user in users:
seller_score = user["seller_reputation"]["power_seller_status"]
scores = scores + [seller_score]
return pd.Series(scores)
def find_imgcount(items):
imgcount = []
for item in items:
item_id = item['id']
n_imgs = get_imgcount(item_id)
imgcount = imgcount + [n_imgs]
return pd.Series(imgcount)
def find_item_score(items):
scores = []
for item in items:
item_score = item["listing_type_id"]
scores = scores + [item_score]
return pd.Series(scores)
def create_dataset(item, reduced=False, extra_features=False):
category_id = item.get('category_id')
condition = item.get('condition')
fname = '%s_%s_%s.csv' % (category_id, condition, 'red' if reduced else 'full')
# TODO: guarda con el False!!!!
if os.path.exists(fname) and False:
df = pd.read_csv(fname, encoding='utf-8')
else:
response = requests.get(URL_BASE + 'sites/MLA/search?category={}&condition={}'.format(category_id, condition))
data = response.json()
limit = data['paging']['limit']
offset = 0
items_number = min(data['paging']['total'], 500)
while offset < items_number:
print offset
response = requests.get(URL_BASE + 'sites/MLA/search?category=' + category_id + '&offset=' + str(offset))
data = response.json()
items = [simplify_item(i, '', '_') for i in data['results']]
page_df = pd.read_json(json.dumps(items))
if offset == 0:
df = page_df
else:
df = df.append(page_df)
offset += limit
if reduced:
# reduce dataFrame to items with stock
# (from which we can calculate a selling price)
df = df[(df.available_quantity > 5) | (df.id == item['id'])]
df_speeds = get_selling_speeds(list(df.id))
df['speed'] = df_speeds.speed
if extra_features:
items = get_items(list(df['id']), ['id',"listing_type_id"])
users = get_users(list(df['id']), ['seller_reputation'])
df['seller_score'] = find_seller_score(users)
df['item_score'] = find_item_score(items)
df['n_images'] = find_imgcount(items)
df.to_csv(fname, encoding='utf-8')
return df
def create_dataset_from_item(item):
"""
Create the dataset from an item dict.
:param item: the item dict.
:return:
"""
create_dataset(item.get('category_id'))
if __name__ == '__main__':
# iPhone 5 16gb
category_id = 'MLA121408'
create_dataset(category_id)
| bsd-3-clause |
pythonvietnam/scikit-learn | examples/ensemble/plot_adaboost_twoclass.py | 347 | 3268 | """
==================
Two-class AdaBoost
==================
This example fits an AdaBoosted decision stump on a non-linearly separable
classification dataset composed of two "Gaussian quantiles" clusters
(see :func:`sklearn.datasets.make_gaussian_quantiles`) and plots the decision
boundary and decision scores. The distributions of decision scores are shown
separately for samples of class A and B. The predicted class label for each
sample is determined by the sign of the decision score. Samples with decision
scores greater than zero are classified as B, and are otherwise classified
as A. The magnitude of a decision score determines the degree of likeness with
the predicted class label. Additionally, a new dataset could be constructed
containing a desired purity of class B, for example, by only selecting samples
with a decision score above some value.
"""
print(__doc__)
# Author: Noel Dawe <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import make_gaussian_quantiles
# Construct dataset
X1, y1 = make_gaussian_quantiles(cov=2.,
n_samples=200, n_features=2,
n_classes=2, random_state=1)
X2, y2 = make_gaussian_quantiles(mean=(3, 3), cov=1.5,
n_samples=300, n_features=2,
n_classes=2, random_state=1)
X = np.concatenate((X1, X2))
y = np.concatenate((y1, - y2 + 1))
# Create and fit an AdaBoosted decision tree
bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=1),
algorithm="SAMME",
n_estimators=200)
bdt.fit(X, y)
plot_colors = "br"
plot_step = 0.02
class_names = "AB"
plt.figure(figsize=(10, 5))
# Plot the decision boundaries
plt.subplot(121)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = bdt.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis("tight")
# Plot the training points
for i, n, c in zip(range(2), class_names, plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1],
c=c, cmap=plt.cm.Paired,
label="Class %s" % n)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.legend(loc='upper right')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Decision Boundary')
# Plot the two-class decision scores
twoclass_output = bdt.decision_function(X)
plot_range = (twoclass_output.min(), twoclass_output.max())
plt.subplot(122)
for i, n, c in zip(range(2), class_names, plot_colors):
plt.hist(twoclass_output[y == i],
bins=10,
range=plot_range,
facecolor=c,
label='Class %s' % n,
alpha=.5)
x1, x2, y1, y2 = plt.axis()
plt.axis((x1, x2, y1, y2 * 1.2))
plt.legend(loc='upper right')
plt.ylabel('Samples')
plt.xlabel('Score')
plt.title('Decision Scores')
plt.tight_layout()
plt.subplots_adjust(wspace=0.35)
plt.show()
| bsd-3-clause |
liikGit/MissionPlanner | Lib/site-packages/scipy/signal/fir_filter_design.py | 53 | 18572 | """Functions for FIR filter design."""
from math import ceil, log
import numpy as np
from numpy.fft import irfft
from scipy.special import sinc
import sigtools
# Some notes on function parameters:
#
# `cutoff` and `width` are given as a numbers between 0 and 1. These
# are relative frequencies, expressed as a fraction of the Nyquist rate.
# For example, if the Nyquist rate is 2KHz, then width=0.15 is a width
# of 300 Hz.
#
# The `order` of a FIR filter is one less than the number of taps.
# This is a potential source of confusion, so in the following code,
# we will always use the number of taps as the parameterization of
# the 'size' of the filter. The "number of taps" means the number
# of coefficients, which is the same as the length of the impulse
# response of the filter.
def kaiser_beta(a):
"""Compute the Kaiser parameter `beta`, given the attenuation `a`.
Parameters
----------
a : float
The desired attenuation in the stopband and maximum ripple in
the passband, in dB. This should be a *positive* number.
Returns
-------
beta : float
The `beta` parameter to be used in the formula for a Kaiser window.
References
----------
Oppenheim, Schafer, "Discrete-Time Signal Processing", p.475-476.
"""
if a > 50:
beta = 0.1102 * (a - 8.7)
elif a > 21:
beta = 0.5842 * (a - 21)**0.4 + 0.07886 * (a - 21)
else:
beta = 0.0
return beta
def kaiser_atten(numtaps, width):
"""Compute the attenuation of a Kaiser FIR filter.
Given the number of taps `N` and the transition width `width`, compute the
attenuation `a` in dB, given by Kaiser's formula:
a = 2.285 * (N - 1) * pi * width + 7.95
Parameters
----------
N : int
The number of taps in the FIR filter.
width : float
The desired width of the transition region between passband and stopband
(or, in general, at any discontinuity) for the filter.
Returns
-------
a : float
The attenuation of the ripple, in dB.
See Also
--------
kaiserord, kaiser_beta
"""
a = 2.285 * (numtaps - 1) * np.pi * width + 7.95
return a
def kaiserord(ripple, width):
"""Design a Kaiser window to limit ripple and width of transition region.
Parameters
----------
ripple : float
Positive number specifying maximum ripple in passband (dB) and minimum
ripple in stopband.
width : float
Width of transition region (normalized so that 1 corresponds to pi
radians / sample).
Returns
-------
numtaps : int
The length of the kaiser window.
beta :
The beta parameter for the kaiser window.
Notes
-----
There are several ways to obtain the Kaiser window:
signal.kaiser(numtaps, beta, sym=0)
signal.get_window(beta, numtaps)
signal.get_window(('kaiser', beta), numtaps)
The empirical equations discovered by Kaiser are used.
See Also
--------
kaiser_beta, kaiser_atten
References
----------
Oppenheim, Schafer, "Discrete-Time Signal Processing", p.475-476.
"""
A = abs(ripple) # in case somebody is confused as to what's meant
if A < 8:
# Formula for N is not valid in this range.
raise ValueError("Requested maximum ripple attentuation %f is too "
"small for the Kaiser formula." % A)
beta = kaiser_beta(A)
# Kaiser's formula (as given in Oppenheim and Schafer) is for the filter
# order, so we have to add 1 to get the number of taps.
numtaps = (A - 7.95) / 2.285 / (np.pi * width) + 1
return int(ceil(numtaps)), beta
def firwin(numtaps, cutoff, width=None, window='hamming', pass_zero=True,
scale=True, nyq=1.0):
"""
FIR filter design using the window method.
This function computes the coefficients of a finite impulse response filter.
The filter will have linear phase; it will be Type I if `numtaps` is odd and
Type II if `numtaps` is even.
Type II filters always have zero response at the Nyquist rate, so a
ValueError exception is raised if firwin is called with `numtaps` even and
having a passband whose right end is at the Nyquist rate.
Parameters
----------
numtaps : int
Length of the filter (number of coefficients, i.e. the filter
order + 1). `numtaps` must be even if a passband includes the
Nyquist frequency.
cutoff : float or 1D array_like
Cutoff frequency of filter (expressed in the same units as `nyq`)
OR an array of cutoff frequencies (that is, band edges). In the
latter case, the frequencies in `cutoff` should be positive and
monotonically increasing between 0 and `nyq`. The values 0 and
`nyq` must not be included in `cutoff`.
width : float or None
If `width` is not None, then assume it is the approximate width
of the transition region (expressed in the same units as `nyq`)
for use in Kaiser FIR filter design. In this case, the `window`
argument is ignored.
window : string or tuple of string and parameter values
Desired window to use. See `scipy.signal.get_window` for a list
of windows and required parameters.
pass_zero : bool
If True, the gain at the frequency 0 (i.e. the "DC gain") is 1.
Otherwise the DC gain is 0.
scale : bool
Set to True to scale the coefficients so that the frequency
response is exactly unity at a certain frequency.
That frequency is either:
0 (DC) if the first passband starts at 0 (i.e. pass_zero
is True);
`nyq` (the Nyquist rate) if the first passband ends at
`nyq` (i.e the filter is a single band highpass filter);
center of first passband otherwise.
nyq : float
Nyquist frequency. Each frequency in `cutoff` must be between 0
and `nyq`.
Returns
-------
h : 1D ndarray
Coefficients of length `numtaps` FIR filter.
Raises
------
ValueError
If any value in `cutoff` is less than or equal to 0 or greater
than or equal to `nyq`, if the values in `cutoff` are not strictly
monotonically increasing, or if `numtaps` is even but a passband
includes the Nyquist frequency.
Examples
--------
Low-pass from 0 to f::
>>> firwin(numtaps, f)
Use a specific window function::
>>> firwin(numtaps, f, window='nuttall')
High-pass ('stop' from 0 to f)::
>>> firwin(numtaps, f, pass_zero=False)
Band-pass::
>>> firwin(numtaps, [f1, f2], pass_zero=False)
Band-stop::
>>> firwin(numtaps, [f1, f2])
Multi-band (passbands are [0, f1], [f2, f3] and [f4, 1])::
>>>firwin(numtaps, [f1, f2, f3, f4])
Multi-band (passbands are [f1, f2] and [f3,f4])::
>>> firwin(numtaps, [f1, f2, f3, f4], pass_zero=False)
See also
--------
scipy.signal.firwin2
"""
# The major enhancements to this function added in November 2010 were
# developed by Tom Krauss (see ticket #902).
cutoff = np.atleast_1d(cutoff) / float(nyq)
# Check for invalid input.
if cutoff.ndim > 1:
raise ValueError("The cutoff argument must be at most one-dimensional.")
if cutoff.size == 0:
raise ValueError("At least one cutoff frequency must be given.")
if cutoff.min() <= 0 or cutoff.max() >= 1:
raise ValueError("Invalid cutoff frequency: frequencies must be greater than 0 and less than nyq.")
if np.any(np.diff(cutoff) <= 0):
raise ValueError("Invalid cutoff frequencies: the frequencies must be strictly increasing.")
if width is not None:
# A width was given. Find the beta parameter of the Kaiser window
# and set `window`. This overrides the value of `window` passed in.
atten = kaiser_atten(numtaps, float(width)/nyq)
beta = kaiser_beta(atten)
window = ('kaiser', beta)
pass_nyquist = bool(cutoff.size & 1) ^ pass_zero
if pass_nyquist and numtaps % 2 == 0:
raise ValueError("A filter with an even number of coefficients must "
"have zero response at the Nyquist rate.")
# Insert 0 and/or 1 at the ends of cutoff so that the length of cutoff is even,
# and each pair in cutoff corresponds to passband.
cutoff = np.hstack(([0.0]*pass_zero, cutoff, [1.0]*pass_nyquist))
# `bands` is a 2D array; each row gives the left and right edges of a passband.
bands = cutoff.reshape(-1,2)
# Build up the coefficients.
alpha = 0.5 * (numtaps-1)
m = np.arange(0, numtaps) - alpha
h = 0
for left, right in bands:
h += right * sinc(right * m)
h -= left * sinc(left * m)
# Get and apply the window function.
from signaltools import get_window
win = get_window(window, numtaps, fftbins=False)
h *= win
# Now handle scaling if desired.
if scale:
# Get the first passband.
left, right = bands[0]
if left == 0:
scale_frequency = 0.0
elif right == 1:
scale_frequency = 1.0
else:
scale_frequency = 0.5 * (left + right)
c = np.cos(np.pi * m * scale_frequency)
s = np.sum(h * c)
h /= s
return h
# Original version of firwin2 from scipy ticket #457, submitted by "tash".
#
# Rewritten by Warren Weckesser, 2010.
def firwin2(numtaps, freq, gain, nfreqs=None, window='hamming', nyq=1.0):
"""FIR filter design using the window method.
From the given frequencies `freq` and corresponding gains `gain`,
this function constructs an FIR filter with linear phase and
(approximately) the given frequency response.
Parameters
----------
numtaps : int
The number of taps in the FIR filter. `numtaps` must be less than
`nfreqs`. If the gain at the Nyquist rate, `gain[-1]`, is not 0,
then `numtaps` must be odd.
freq : array-like, 1D
The frequency sampling points. Typically 0.0 to 1.0 with 1.0 being
Nyquist. The Nyquist frequency can be redefined with the argument
`nyq`.
The values in `freq` must be nondecreasing. A value can be repeated
once to implement a discontinuity. The first value in `freq` must
be 0, and the last value must be `nyq`.
gain : array-like
The filter gains at the frequency sampling points.
nfreqs : int, optional
The size of the interpolation mesh used to construct the filter.
For most efficient behavior, this should be a power of 2 plus 1
(e.g, 129, 257, etc). The default is one more than the smallest
power of 2 that is not less than `numtaps`. `nfreqs` must be greater
than `numtaps`.
window : string or (string, float) or float, or None, optional
Window function to use. Default is "hamming". See
`scipy.signal.get_window` for the complete list of possible values.
If None, no window function is applied.
nyq : float
Nyquist frequency. Each frequency in `freq` must be between 0 and
`nyq` (inclusive).
Returns
-------
taps : numpy 1D array of length `numtaps`
The filter coefficients of the FIR filter.
Example
-------
A lowpass FIR filter with a response that is 1 on [0.0, 0.5], and
that decreases linearly on [0.5, 1.0] from 1 to 0:
>>> taps = firwin2(150, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0])
>>> print(taps[72:78])
[-0.02286961 -0.06362756 0.57310236 0.57310236 -0.06362756 -0.02286961]
See also
--------
scipy.signal.firwin
Notes
-----
From the given set of frequencies and gains, the desired response is
constructed in the frequency domain. The inverse FFT is applied to the
desired response to create the associated convolution kernel, and the
first `numtaps` coefficients of this kernel, scaled by `window`, are
returned.
The FIR filter will have linear phase. The filter is Type I if `numtaps`
is odd and Type II if `numtaps` is even. Because Type II filters always
have a zero at the Nyquist frequency, `numtaps` must be odd if `gain[-1]`
is not zero.
.. versionadded:: 0.9.0
References
----------
.. [1] Oppenheim, A. V. and Schafer, R. W., "Discrete-Time Signal
Processing", Prentice-Hall, Englewood Cliffs, New Jersey (1989).
(See, for example, Section 7.4.)
.. [2] Smith, Steven W., "The Scientist and Engineer's Guide to Digital
Signal Processing", Ch. 17. http://www.dspguide.com/ch17/1.htm
"""
if len(freq) != len(gain):
raise ValueError('freq and gain must be of same length.')
if nfreqs is not None and numtaps >= nfreqs:
raise ValueError('ntaps must be less than nfreqs, but firwin2 was '
'called with ntaps=%d and nfreqs=%s' % (numtaps, nfreqs))
if freq[0] != 0 or freq[-1] != nyq:
raise ValueError('freq must start with 0 and end with `nyq`.')
d = np.diff(freq)
if (d < 0).any():
raise ValueError('The values in freq must be nondecreasing.')
d2 = d[:-1] + d[1:]
if (d2 == 0).any():
raise ValueError('A value in freq must not occur more than twice.')
if numtaps % 2 == 0 and gain[-1] != 0.0:
raise ValueError("A filter with an even number of coefficients must "
"have zero gain at the Nyquist rate.")
if nfreqs is None:
nfreqs = 1 + 2 ** int(ceil(log(numtaps,2)))
# Tweak any repeated values in freq so that interp works.
eps = np.finfo(float).eps
for k in range(len(freq)):
if k < len(freq)-1 and freq[k] == freq[k+1]:
freq[k] = freq[k] - eps
freq[k+1] = freq[k+1] + eps
# Linearly interpolate the desired response on a uniform mesh `x`.
x = np.linspace(0.0, nyq, nfreqs)
fx = np.interp(x, freq, gain)
# Adjust the phases of the coefficients so that the first `ntaps` of the
# inverse FFT are the desired filter coefficients.
shift = np.exp(-(numtaps-1)/2. * 1.j * np.pi * x / nyq)
fx2 = fx * shift
# Use irfft to compute the inverse FFT.
out_full = irfft(fx2)
if window is not None:
# Create the window to apply to the filter coefficients.
from signaltools import get_window
wind = get_window(window, numtaps, fftbins=False)
else:
wind = 1
# Keep only the first `numtaps` coefficients in `out`, and multiply by
# the window.
out = out_full[:numtaps] * wind
return out
def remez(numtaps, bands, desired, weight=None, Hz=1, type='bandpass',
maxiter=25, grid_density=16):
"""
Calculate the minimax optimal filter using the Remez exchange algorithm.
Calculate the filter-coefficients for the finite impulse response
(FIR) filter whose transfer function minimizes the maximum error
between the desired gain and the realized gain in the specified
frequency bands using the Remez exchange algorithm.
Parameters
----------
numtaps : int
The desired number of taps in the filter. The number of taps is
the number of terms in the filter, or the filter order plus one.
bands : array_like
A monotonic sequence containing the band edges in Hz.
All elements must be non-negative and less than half the sampling
frequency as given by `Hz`.
desired : array_like
A sequence half the size of bands containing the desired gain
in each of the specified bands.
weight : array_like, optional
A relative weighting to give to each band region. The length of
`weight` has to be half the length of `bands`.
Hz : scalar, optional
The sampling frequency in Hz. Default is 1.
type : {'bandpass', 'differentiator', 'hilbert'}, optional
The type of filter:
'bandpass' : flat response in bands. This is the default.
'differentiator' : frequency proportional response in bands.
'hilbert' : filter with odd symmetry, that is, type III
(for even order) or type IV (for odd order)
linear phase filters.
maxiter : int, optional
Maximum number of iterations of the algorithm. Default is 25.
grid_density : int, optional
Grid density. The dense grid used in `remez` is of size
``(numtaps + 1) * grid_density``. Default is 16.
Returns
-------
out : ndarray
A rank-1 array containing the coefficients of the optimal
(in a minimax sense) filter.
See Also
--------
freqz : Compute the frequency response of a digital filter.
References
----------
.. [1] J. H. McClellan and T. W. Parks, "A unified approach to the
design of optimum FIR linear phase digital filters",
IEEE Trans. Circuit Theory, vol. CT-20, pp. 697-701, 1973.
.. [2] J. H. McClellan, T. W. Parks and L. R. Rabiner, "A Computer
Program for Designing Optimum FIR Linear Phase Digital
Filters", IEEE Trans. Audio Electroacoust., vol. AU-21,
pp. 506-525, 1973.
Examples
--------
We want to construct a filter with a passband at 0.2-0.4 Hz, and
stop bands at 0-0.1 Hz and 0.45-0.5 Hz. Note that this means that the
behavior in the frequency ranges between those bands is unspecified and
may overshoot.
>>> bpass = sp.signal.remez(72, [0, 0.1, 0.2, 0.4, 0.45, 0.5], [0, 1, 0])
>>> freq, response = sp.signal.freqz(bpass)
>>> ampl = np.abs(response)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(111)
>>> ax1.semilogy(freq/(2*np.pi), ampl, 'b-') # freq in Hz
[<matplotlib.lines.Line2D object at 0xf486790>]
>>> plt.show()
"""
# Convert type
try:
tnum = {'bandpass':1, 'differentiator':2, 'hilbert':3}[type]
except KeyError:
raise ValueError("Type must be 'bandpass', 'differentiator', or 'hilbert'")
# Convert weight
if weight is None:
weight = [1] * len(desired)
bands = np.asarray(bands).copy()
return sigtools._remez(numtaps, bands, desired, weight, tnum, Hz,
maxiter, grid_density)
| gpl-3.0 |
jkarnows/scikit-learn | sklearn/utils/fixes.py | 29 | 12072 | """Compatibility fixes for older version of python, numpy and scipy
If you add content to this file, please give the version of the package
at which the fixe is no longer needed.
"""
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# Fabian Pedregosa <[email protected]>
# Lars Buitinck
#
# License: BSD 3 clause
import inspect
import warnings
import sys
import functools
import numpy as np
import scipy.sparse as sp
import scipy
def _parse_version(version_string):
version = []
for x in version_string.split('.'):
try:
version.append(int(x))
except ValueError:
# x may be of the form dev-1ea1592
version.append(x)
return tuple(version)
np_version = _parse_version(np.__version__)
sp_version = _parse_version(scipy.__version__)
try:
from scipy.special import expit # SciPy >= 0.10
with np.errstate(invalid='ignore', over='ignore'):
if np.isnan(expit(1000)): # SciPy < 0.14
raise ImportError("no stable expit in scipy.special")
except ImportError:
def expit(x, out=None):
"""Logistic sigmoid function, ``1 / (1 + exp(-x))``.
See sklearn.utils.extmath.log_logistic for the log of this function.
"""
if out is None:
out = np.empty(np.atleast_1d(x).shape, dtype=np.float64)
out[:] = x
# 1 / (1 + exp(-x)) = (1 + tanh(x / 2)) / 2
# This way of computing the logistic is both fast and stable.
out *= .5
np.tanh(out, out)
out += 1
out *= .5
return out.reshape(np.shape(x))
# little danse to see if np.copy has an 'order' keyword argument
if 'order' in inspect.getargspec(np.copy)[0]:
def safe_copy(X):
# Copy, but keep the order
return np.copy(X, order='K')
else:
# Before an 'order' argument was introduced, numpy wouldn't muck with
# the ordering
safe_copy = np.copy
try:
if (not np.allclose(np.divide(.4, 1, casting="unsafe"),
np.divide(.4, 1, casting="unsafe", dtype=np.float))
or not np.allclose(np.divide(.4, 1), .4)):
raise TypeError('Divide not working with dtype: '
'https://github.com/numpy/numpy/issues/3484')
divide = np.divide
except TypeError:
# Compat for old versions of np.divide that do not provide support for
# the dtype args
def divide(x1, x2, out=None, dtype=None):
out_orig = out
if out is None:
out = np.asarray(x1, dtype=dtype)
if out is x1:
out = x1.copy()
else:
if out is not x1:
out[:] = x1
if dtype is not None and out.dtype != dtype:
out = out.astype(dtype)
out /= x2
if out_orig is None and np.isscalar(x1):
out = np.asscalar(out)
return out
try:
np.array(5).astype(float, copy=False)
except TypeError:
# Compat where astype accepted no copy argument
def astype(array, dtype, copy=True):
if not copy and array.dtype == dtype:
return array
return array.astype(dtype)
else:
astype = np.ndarray.astype
try:
with warnings.catch_warnings(record=True):
# Don't raise the numpy deprecation warnings that appear in
# 1.9, but avoid Python bug due to simplefilter('ignore')
warnings.simplefilter('always')
sp.csr_matrix([1.0, 2.0, 3.0]).max(axis=0)
except (TypeError, AttributeError):
# in scipy < 14.0, sparse matrix min/max doesn't accept an `axis` argument
# the following code is taken from the scipy 0.14 codebase
def _minor_reduce(X, ufunc):
major_index = np.flatnonzero(np.diff(X.indptr))
if X.data.size == 0 and major_index.size == 0:
# Numpy < 1.8.0 don't handle empty arrays in reduceat
value = np.zeros_like(X.data)
else:
value = ufunc.reduceat(X.data, X.indptr[major_index])
return major_index, value
def _min_or_max_axis(X, axis, min_or_max):
N = X.shape[axis]
if N == 0:
raise ValueError("zero-size array to reduction operation")
M = X.shape[1 - axis]
mat = X.tocsc() if axis == 0 else X.tocsr()
mat.sum_duplicates()
major_index, value = _minor_reduce(mat, min_or_max)
not_full = np.diff(mat.indptr)[major_index] < N
value[not_full] = min_or_max(value[not_full], 0)
mask = value != 0
major_index = np.compress(mask, major_index)
value = np.compress(mask, value)
from scipy.sparse import coo_matrix
if axis == 0:
res = coo_matrix((value, (np.zeros(len(value)), major_index)),
dtype=X.dtype, shape=(1, M))
else:
res = coo_matrix((value, (major_index, np.zeros(len(value)))),
dtype=X.dtype, shape=(M, 1))
return res.A.ravel()
def _sparse_min_or_max(X, axis, min_or_max):
if axis is None:
if 0 in X.shape:
raise ValueError("zero-size array to reduction operation")
zero = X.dtype.type(0)
if X.nnz == 0:
return zero
m = min_or_max.reduce(X.data.ravel())
if X.nnz != np.product(X.shape):
m = min_or_max(zero, m)
return m
if axis < 0:
axis += 2
if (axis == 0) or (axis == 1):
return _min_or_max_axis(X, axis, min_or_max)
else:
raise ValueError("invalid axis, use 0 for rows, or 1 for columns")
def sparse_min_max(X, axis):
return (_sparse_min_or_max(X, axis, np.minimum),
_sparse_min_or_max(X, axis, np.maximum))
else:
def sparse_min_max(X, axis):
return (X.min(axis=axis).toarray().ravel(),
X.max(axis=axis).toarray().ravel())
try:
from numpy import argpartition
except ImportError:
# numpy.argpartition was introduced in v 1.8.0
def argpartition(a, kth, axis=-1, kind='introselect', order=None):
return np.argsort(a, axis=axis, order=order)
try:
from itertools import combinations_with_replacement
except ImportError:
# Backport of itertools.combinations_with_replacement for Python 2.6,
# from Python 3.4 documentation (http://tinyurl.com/comb-w-r), copyright
# Python Software Foundation (https://docs.python.org/3/license.html)
def combinations_with_replacement(iterable, r):
# combinations_with_replacement('ABC', 2) --> AA AB AC BB BC CC
pool = tuple(iterable)
n = len(pool)
if not n and r:
return
indices = [0] * r
yield tuple(pool[i] for i in indices)
while True:
for i in reversed(range(r)):
if indices[i] != n - 1:
break
else:
return
indices[i:] = [indices[i] + 1] * (r - i)
yield tuple(pool[i] for i in indices)
try:
from numpy import isclose
except ImportError:
def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Returns a boolean array where two arrays are element-wise equal within
a tolerance.
This function was added to numpy v1.7.0, and the version you are
running has been backported from numpy v1.8.1. See its documentation
for more details.
"""
def within_tol(x, y, atol, rtol):
with np.errstate(invalid='ignore'):
result = np.less_equal(abs(x - y), atol + rtol * abs(y))
if np.isscalar(a) and np.isscalar(b):
result = bool(result)
return result
x = np.array(a, copy=False, subok=True, ndmin=1)
y = np.array(b, copy=False, subok=True, ndmin=1)
xfin = np.isfinite(x)
yfin = np.isfinite(y)
if all(xfin) and all(yfin):
return within_tol(x, y, atol, rtol)
else:
finite = xfin & yfin
cond = np.zeros_like(finite, subok=True)
# Since we're using boolean indexing, x & y must be the same shape.
# Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in
# lib.stride_tricks, though, so we can't import it here.
x = x * np.ones_like(cond)
y = y * np.ones_like(cond)
# Avoid subtraction with infinite/nan values...
cond[finite] = within_tol(x[finite], y[finite], atol, rtol)
# Check for equality of infinite values...
cond[~finite] = (x[~finite] == y[~finite])
if equal_nan:
# Make NaN == NaN
cond[np.isnan(x) & np.isnan(y)] = True
return cond
if np_version < (1, 7):
# Prior to 1.7.0, np.frombuffer wouldn't work for empty first arg.
def frombuffer_empty(buf, dtype):
if len(buf) == 0:
return np.empty(0, dtype=dtype)
else:
return np.frombuffer(buf, dtype=dtype)
else:
frombuffer_empty = np.frombuffer
if np_version < (1, 8):
def in1d(ar1, ar2, assume_unique=False, invert=False):
# Backport of numpy function in1d 1.8.1 to support numpy 1.6.2
# Ravel both arrays, behavior for the first array could be different
ar1 = np.asarray(ar1).ravel()
ar2 = np.asarray(ar2).ravel()
# This code is significantly faster when the condition is satisfied.
if len(ar2) < 10 * len(ar1) ** 0.145:
if invert:
mask = np.ones(len(ar1), dtype=np.bool)
for a in ar2:
mask &= (ar1 != a)
else:
mask = np.zeros(len(ar1), dtype=np.bool)
for a in ar2:
mask |= (ar1 == a)
return mask
# Otherwise use sorting
if not assume_unique:
ar1, rev_idx = np.unique(ar1, return_inverse=True)
ar2 = np.unique(ar2)
ar = np.concatenate((ar1, ar2))
# We need this to be a stable sort, so always use 'mergesort'
# here. The values from the first array should always come before
# the values from the second array.
order = ar.argsort(kind='mergesort')
sar = ar[order]
if invert:
bool_ar = (sar[1:] != sar[:-1])
else:
bool_ar = (sar[1:] == sar[:-1])
flag = np.concatenate((bool_ar, [invert]))
indx = order.argsort(kind='mergesort')[:len(ar1)]
if assume_unique:
return flag[indx]
else:
return flag[indx][rev_idx]
else:
from numpy import in1d
if sp_version < (0, 15):
# Backport fix for scikit-learn/scikit-learn#2986 / scipy/scipy#4142
from ._scipy_sparse_lsqr_backport import lsqr as sparse_lsqr
else:
from scipy.sparse.linalg import lsqr as sparse_lsqr
if sys.version_info < (2, 7, 0):
# partial cannot be pickled in Python 2.6
# http://bugs.python.org/issue1398
class partial(object):
def __init__(self, func, *args, **keywords):
functools.update_wrapper(self, func)
self.func = func
self.args = args
self.keywords = keywords
def __call__(self, *args, **keywords):
args = self.args + args
kwargs = self.keywords.copy()
kwargs.update(keywords)
return self.func(*args, **kwargs)
else:
from functools import partial
if np_version < (1, 6, 2):
# Allow bincount to accept empty arrays
# https://github.com/numpy/numpy/commit/40f0844846a9d7665616b142407a3d74cb65a040
def bincount(x, weights=None, minlength=None):
if len(x) > 0:
return np.bincount(x, weights, minlength)
else:
if minlength is None:
minlength = 0
minlength = np.asscalar(np.asarray(minlength, dtype=np.intp))
return np.zeros(minlength, dtype=np.intp)
else:
from numpy import bincount
| bsd-3-clause |
jaeilepp/mne-python | examples/realtime/plot_compute_rt_decoder.py | 4 | 3942 | """
=======================
Decoding real-time data
=======================
Supervised machine learning applied to MEG data in sensor space.
Here the classifier is updated every 5 trials and the decoding
accuracy is plotted
"""
# Authors: Mainak Jas <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.realtime import MockRtClient, RtEpochs
from mne.datasets import sample
print(__doc__)
# Fiff file to simulate the realtime client
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
raw = mne.io.read_raw_fif(raw_fname, preload=True)
tmin, tmax = -0.2, 0.5
event_id = dict(aud_l=1, vis_l=3)
tr_percent = 60 # Training percentage
min_trials = 10 # minimum trials after which decoding should start
# select gradiometers
picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True,
stim=True, exclude=raw.info['bads'])
# create the mock-client object
rt_client = MockRtClient(raw)
# create the real-time epochs object
rt_epochs = RtEpochs(rt_client, event_id, tmin, tmax, picks=picks, decim=1,
reject=dict(grad=4000e-13, eog=150e-6), baseline=None,
isi_max=4.)
# start the acquisition
rt_epochs.start()
# send raw buffers
rt_client.send_data(rt_epochs, picks, tmin=0, tmax=90, buffer_size=1000)
# Decoding in sensor space using a linear SVM
n_times = len(rt_epochs.times)
from sklearn import preprocessing # noqa
from sklearn.svm import SVC # noqa
from sklearn.pipeline import Pipeline # noqa
from sklearn.cross_validation import cross_val_score, ShuffleSplit # noqa
from mne.decoding import Vectorizer, FilterEstimator # noqa
scores_x, scores, std_scores = [], [], []
# don't highpass filter because it's epoched data and the signal length
# is small
filt = FilterEstimator(rt_epochs.info, None, 40)
scaler = preprocessing.StandardScaler()
vectorizer = Vectorizer()
clf = SVC(C=1, kernel='linear')
concat_classifier = Pipeline([('filter', filt), ('vector', vectorizer),
('scaler', scaler), ('svm', clf)])
data_picks = mne.pick_types(rt_epochs.info, meg='grad', eeg=False, eog=True,
stim=False, exclude=raw.info['bads'])
ax = plt.subplot(111)
ax.set_xlabel('Trials')
ax.set_ylabel('Classification score (% correct)')
ax.set_title('Real-time decoding')
ax.set_xlim([min_trials, 50])
ax.set_ylim([30, 105])
plt.axhline(50, color='k', linestyle='--', label="Chance level")
plt.show(block=False)
for ev_num, ev in enumerate(rt_epochs.iter_evoked()):
print("Just got epoch %d" % (ev_num + 1))
if ev_num == 0:
X = ev.data[None, data_picks, :]
y = int(ev.comment) # the comment attribute contains the event_id
else:
X = np.concatenate((X, ev.data[None, data_picks, :]), axis=0)
y = np.append(y, int(ev.comment))
if ev_num >= min_trials:
cv = ShuffleSplit(len(y), 5, test_size=0.2, random_state=42)
scores_t = cross_val_score(concat_classifier, X, y, cv=cv,
n_jobs=1) * 100
std_scores.append(scores_t.std())
scores.append(scores_t.mean())
scores_x.append(ev_num)
# Plot accuracy
plt.plot(scores_x[-2:], scores[-2:], '-x', color='b',
label="Classif. score")
ax.hold(True)
ax.plot(scores_x[-1], scores[-1])
hyp_limits = (np.asarray(scores) - np.asarray(std_scores),
np.asarray(scores) + np.asarray(std_scores))
fill = plt.fill_between(scores_x, hyp_limits[0], y2=hyp_limits[1],
color='b', alpha=0.5)
plt.pause(0.01)
plt.draw()
ax.collections.remove(fill) # Remove old fill area
plt.fill_between(scores_x, hyp_limits[0], y2=hyp_limits[1], color='b',
alpha=0.5)
plt.draw() # Final figure
| bsd-3-clause |
MaxPowerWasTaken/MaxPowerWasTaken.github.io | jupyter_notebooks/mnist code scratch.py | 1 | 2227 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 22 00:52:20 2017
@author: max
"""
import math
import random
import numpy as np
import pandas as pd
from sklearn.datasets import fetch_mldata
from sklearn.cross_validation import train_test_split
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
import os
os.chdir('/home/max/model_idiot/content/jupyter_notebooks')
# load mnist data
mnist = fetch_mldata('MNIST original', data_home='datasets/')
# Convert sklearn 'datasets bunch' object to Pandas
y = pd.Series(mnist.target).astype('int').astype('category')
X = pd.DataFrame(mnist.data)
# Change column-names in X to reflect that they are pixel values
X.columns = ['pixel_'+str(x) for x in range(X.shape[1])]
# Prepare to plot 9 random images
images_to_plot = 9
random_indices = random.sample(range(70000), images_to_plot)
sample_images = X.loc[random_indices, :]
sample_labels = y.loc[random_indices]
X_train, X_test, y_train, y_test = train_test_split(X,y, test_size = .4)
from sklearn.pipeline import make_pipeline
from sklearn.decomposition import PCA
tsne = TSNE()
tsne
# It is highly recommended to use another dimensionality reduction method (e.g. PCA for dense data or
# TruncatedSVD for sparse data) to reduce the number of dimensions to a reasonable amount (e.g. 50)
# if the number of features is very high.
rows=1000
sample_indices = random.sample(range(X_train.shape[0]), rows)
X_train_sample = X_train.iloc[sample_indices,:]
y_train_sample = y_train.iloc[sample_indices]
# https://www.reddit.com/r/MachineLearning/comments/47kf7w/scikitlearn_tsne_implementation/ (suggests lr=200)
pca_preprocessed_tsne = make_pipeline(PCA(n_components=50), TSNE(n_components=2, learning_rate=200, perplexity=50))
embedded_data = pca_preprocessed_tsne.fit_transform(X_train_sample)
plt.figure()
ax = plt.subplot(111)
X = embedded_data
for i in range(X.shape[0]):
print(i)
print(X[i, 0], X[i, 1])
print(str(y_train_sample.iloc[i]))
print(y_train_sample.iloc[i])
plt.text(X[i, 0], X[i, 1], str(y_train_sample.iloc[i]),
color=plt.cm.Set1(y_train_sample.iloc[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
plt.show()
| gpl-3.0 |
pratapvardhan/scikit-learn | sklearn/externals/joblib/testing.py | 45 | 2720 | """
Helper for testing.
"""
import sys
import warnings
import os.path
import re
import subprocess
import threading
from sklearn.externals.joblib._compat import PY3_OR_LATER
def warnings_to_stdout():
""" Redirect all warnings to stdout.
"""
showwarning_orig = warnings.showwarning
def showwarning(msg, cat, fname, lno, file=None, line=0):
showwarning_orig(msg, cat, os.path.basename(fname), line, sys.stdout)
warnings.showwarning = showwarning
#warnings.simplefilter('always')
try:
from nose.tools import assert_raises_regex
except ImportError:
# For Python 2.7
try:
from nose.tools import assert_raises_regexp as assert_raises_regex
except ImportError:
# for Python 2.6
def assert_raises_regex(expected_exception, expected_regexp,
callable_obj=None, *args, **kwargs):
"""Helper function to check for message patterns in exceptions"""
not_raised = False
try:
callable_obj(*args, **kwargs)
not_raised = True
except Exception as e:
error_message = str(e)
if not re.compile(expected_regexp).search(error_message):
raise AssertionError("Error message should match pattern "
"%r. %r does not." %
(expected_regexp, error_message))
if not_raised:
raise AssertionError("Should have raised %r" %
expected_exception(expected_regexp))
def check_subprocess_call(cmd, timeout=1, stdout_regex=None):
"""Runs a command in a subprocess with timeout in seconds.
Also checks returncode is zero and stdout if stdout_regex is set.
"""
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def kill_process():
proc.kill()
timer = threading.Timer(timeout, kill_process)
try:
timer.start()
stdout, stderr = proc.communicate()
if PY3_OR_LATER:
stdout, stderr = stdout.decode(), stderr.decode()
if proc.returncode != 0:
message = (
'Non-zero return code: {0}.\nStdout:\n{1}\n'
'Stderr:\n{2}').format(
proc.returncode, stdout, stderr)
raise ValueError(message)
if (stdout_regex is not None and
not re.search(stdout_regex, stdout)):
raise ValueError(
"Unexpected output: '{0!r}' does not match:\n{1!r}".format(
stdout_regex, stdout))
finally:
timer.cancel()
| bsd-3-clause |
michaelhush/M-LOOP | setup.py | 1 | 3091 | '''
Setup script for M-LOOP using setuptools. See the documentation of setuptools for further details.
'''
from __future__ import absolute_import, division, print_function
import multiprocessing as mp
import mloop as ml
from setuptools import setup, find_packages
from os import path
def main():
long_description = ''
here = path.abspath(path.dirname(__file__))
description_path = path.join(here, 'DESCRIPTION.rst')
if path.exists(description_path):
with open(description_path, 'rb') as stream:
long_description = stream.read().decode('utf8')
setup(
name = 'M-LOOP',
version = ml.__version__,
packages = find_packages(),
entry_points={
'console_scripts': [
'M-LOOP = mloop.cmd:run_mloop'
],
},
setup_requires=['pytest-runner'],
install_requires = ['pip>=7.0',
'docutils>=0.3',
'numpy>=1.11',
'scipy>=0.17',
'matplotlib>=1.5',
'pytest>=2.9',
'scikit-learn>=0.18',
'tensorflow>=2.0.0'],
tests_require=['pytest','setuptools>=26'],
package_data = {
# If any package contains *.txt or *.rst files, include them:
'': ['*.txt','*.md'],
},
author = 'Michael R Hush',
author_email = '[email protected]',
description = 'M-LOOP: Machine-learning online optimization package. A python package of automated optimization tools - enhanced with machine-learning - for quantum scientific experiments, computer controlled systems or other optimization tasks.',
long_description = long_description,
license = 'MIT',
keywords = 'automated machine learning optimization optimisation science experiment quantum',
url = 'https://github.com/michaelhush/M-LOOP/',
download_url = 'https://github.com/michaelhush/M-LOOP/tarball/3.2.1',
classifiers = ['Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Science/Research',
'Intended Audience :: Manufacturing',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: Linux',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Physics']
)
if __name__=='__main__':
mp.freeze_support()
main()
| mit |
lin-credible/scikit-learn | examples/svm/plot_svm_scale_c.py | 223 | 5375 | """
==============================================
Scaling the regularization parameter for SVCs
==============================================
The following example illustrates the effect of scaling the
regularization parameter when using :ref:`svm` for
:ref:`classification <svm_classification>`.
For SVC classification, we are interested in a risk minimization for the
equation:
.. math::
C \sum_{i=1, n} \mathcal{L} (f(x_i), y_i) + \Omega (w)
where
- :math:`C` is used to set the amount of regularization
- :math:`\mathcal{L}` is a `loss` function of our samples
and our model parameters.
- :math:`\Omega` is a `penalty` function of our model parameters
If we consider the loss function to be the individual error per
sample, then the data-fit term, or the sum of the error for each sample, will
increase as we add more samples. The penalization term, however, will not
increase.
When using, for example, :ref:`cross validation <cross_validation>`, to
set the amount of regularization with `C`, there will be a
different amount of samples between the main problem and the smaller problems
within the folds of the cross validation.
Since our loss function is dependent on the amount of samples, the latter
will influence the selected value of `C`.
The question that arises is `How do we optimally adjust C to
account for the different amount of training samples?`
The figures below are used to illustrate the effect of scaling our
`C` to compensate for the change in the number of samples, in the
case of using an `l1` penalty, as well as the `l2` penalty.
l1-penalty case
-----------------
In the `l1` case, theory says that prediction consistency
(i.e. that under given hypothesis, the estimator
learned predicts as well as a model knowing the true distribution)
is not possible because of the bias of the `l1`. It does say, however,
that model consistency, in terms of finding the right set of non-zero
parameters as well as their signs, can be achieved by scaling
`C1`.
l2-penalty case
-----------------
The theory says that in order to achieve prediction consistency, the
penalty parameter should be kept constant
as the number of samples grow.
Simulations
------------
The two figures below plot the values of `C` on the `x-axis` and the
corresponding cross-validation scores on the `y-axis`, for several different
fractions of a generated data-set.
In the `l1` penalty case, the cross-validation-error correlates best with
the test-error, when scaling our `C` with the number of samples, `n`,
which can be seen in the first figure.
For the `l2` penalty case, the best result comes from the case where `C`
is not scaled.
.. topic:: Note:
Two separate datasets are used for the two different plots. The reason
behind this is the `l1` case works better on sparse data, while `l2`
is better suited to the non-sparse case.
"""
print(__doc__)
# Author: Andreas Mueller <[email protected]>
# Jaques Grobler <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import LinearSVC
from sklearn.cross_validation import ShuffleSplit
from sklearn.grid_search import GridSearchCV
from sklearn.utils import check_random_state
from sklearn import datasets
rnd = check_random_state(1)
# set up dataset
n_samples = 100
n_features = 300
# l1 data (only 5 informative features)
X_1, y_1 = datasets.make_classification(n_samples=n_samples,
n_features=n_features, n_informative=5,
random_state=1)
# l2 data: non sparse, but less features
y_2 = np.sign(.5 - rnd.rand(n_samples))
X_2 = rnd.randn(n_samples, n_features / 5) + y_2[:, np.newaxis]
X_2 += 5 * rnd.randn(n_samples, n_features / 5)
clf_sets = [(LinearSVC(penalty='l1', loss='squared_hinge', dual=False,
tol=1e-3),
np.logspace(-2.3, -1.3, 10), X_1, y_1),
(LinearSVC(penalty='l2', loss='squared_hinge', dual=True,
tol=1e-4),
np.logspace(-4.5, -2, 10), X_2, y_2)]
colors = ['b', 'g', 'r', 'c']
for fignum, (clf, cs, X, y) in enumerate(clf_sets):
# set up the plot for each regressor
plt.figure(fignum, figsize=(9, 10))
for k, train_size in enumerate(np.linspace(0.3, 0.7, 3)[::-1]):
param_grid = dict(C=cs)
# To get nice curve, we need a large number of iterations to
# reduce the variance
grid = GridSearchCV(clf, refit=False, param_grid=param_grid,
cv=ShuffleSplit(n=n_samples, train_size=train_size,
n_iter=250, random_state=1))
grid.fit(X, y)
scores = [x[1] for x in grid.grid_scores_]
scales = [(1, 'No scaling'),
((n_samples * train_size), '1/n_samples'),
]
for subplotnum, (scaler, name) in enumerate(scales):
plt.subplot(2, 1, subplotnum + 1)
plt.xlabel('C')
plt.ylabel('CV Score')
grid_cs = cs * float(scaler) # scale the C's
plt.semilogx(grid_cs, scores, label="fraction %.2f" %
train_size)
plt.title('scaling=%s, penalty=%s, loss=%s' %
(name, clf.penalty, clf.loss))
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
pozar87/apts | apts/observations.py | 1 | 8310 | import logging
from datetime import datetime, timedelta
from string import Template
import matplotlib.dates as mdates
import numpy
import pkg_resources
import svgwrite as svg
from matplotlib import pyplot
from .conditions import Conditions
from .objects.messier import Messier
from .objects.planets import Planets
from .utils import Utils
from .constants import ObjectTableLabels
logger = logging.getLogger(__name__)
class Observation:
NOTIFICATION_TEMPLATE = pkg_resources.resource_filename('apts', 'templates/notification.html.template')
def __init__(self, place, equipment, conditions=Conditions()):
self.place = place
self.equipment = equipment
self.conditions = conditions
self.start, self.stop = self._normalize_dates(
place.sunset_time(), place.sunrise_time())
self.local_messier = Messier(self.place)
self.local_planets = Planets(self.place)
# Compute time limit
max_return_time = [int(value)
for value in self.conditions.max_return.split(":")]
time_limit = self.start.replace(
hour=max_return_time[0], minute=max_return_time[1], second=max_return_time[2])
self.time_limit = time_limit if time_limit > self.start else time_limit + \
timedelta(days=1)
def get_visible_messier(self, **args):
return self.local_messier.get_visible(self.conditions, self.start, self.time_limit, **args)
def get_visible_planets(self, **args):
return self.local_planets.get_visible(self.conditions, self.start, self.time_limit, **args)
def plot_visible_planets_svg(self, **args):
visible_planets = self.get_visible_planets(**args)
dwg = svg.Drawing()
# Set y offset to biggest planet
y = int(visible_planets[['Size']].max() + 12)
# Set x offset to constant value
x = 20
# Set delta to constant value
minimal_delta = 52
last_radius = None
for planet in visible_planets[['Name', 'Size', 'Phase']].values:
name, radius, phase = planet[0], planet[1], str(round(planet[2], 2))
if last_radius is None:
y += radius
x += radius
else:
x += max(radius + last_radius + 10, minimal_delta)
last_radius = radius
dwg.add(svg.shapes.Circle(center=(x, y), r=radius, stroke="black", stroke_width="1", fill="#e4e4e4"))
dwg.add(svg.text.Text(name, insert=(x, y + radius + 15), text_anchor='middle'))
dwg.add(svg.text.Text(phase + '%', insert=(x, y - radius - 4), text_anchor='middle'))
return dwg.tostring()
def plot_visible_planets(self):
try:
from IPython.display import SVG
except:
logger.warning("You can plot images only in Ipython notebook!")
return
return SVG(self.plot_visible_planets_svg())
def _generate_plot_messier(self, **args):
messier = self.get_visible_messier(
)[[ObjectTableLabels.MESSIER,
ObjectTableLabels.TRANSIT,
ObjectTableLabels.ALTITUDE,
ObjectTableLabels.WIDTH]]
plot = messier.plot(
x=ObjectTableLabels.TRANSIT,
y=ObjectTableLabels.ALTITUDE,
marker='o',
# markersize = messier['Width'],
linestyle='none',
xlim=[self.start - timedelta(minutes=15),
self.time_limit + timedelta(minutes=15)],
ylim=(0, 90), **args)
last_position = [0, 0]
offset_index = 0
offsets = [(-25, -12), (5, 5), (-25, 5), (5, -12)]
for obj in messier.values:
distance = (((mdates.date2num(
obj[1]) - last_position[0]) * 100) ** 2 + (obj[2] - last_position[1]) ** 2) ** 0.5
offset_index = offset_index + (1 if distance < 5 else 0)
plot.annotate(obj[0],
(mdates.date2num(obj[1]), obj[2]),
xytext=offsets[offset_index % len(offsets)],
textcoords='offset points')
last_position = [mdates.date2num(obj[1]), obj[2]]
self._mark_observation(plot)
self._mark_good_conditions(
plot, self.conditions.min_object_altitude, 90)
Utils.annotate_plot(plot, 'Altitude [°]')
return plot.get_figure()
def _normalize_dates(self, start, stop):
now = datetime.utcnow().astimezone(self.place.local_timezone)
new_start = start if start < stop else now
new_stop = stop
return (new_start, new_stop)
def plot_weather(self, **args):
if self.place.weather is None:
self.place.get_weather()
self._generate_plot_weather(**args)
def plot_messier(self, **args):
self._generate_plot_messier(**args)
def _compute_weather_goodnse(self):
# Get critical weather data
data = self.place.weather.get_critical_data(self.start, self.stop)
# Get only data defore time limit
data = data[data.time <= self.time_limit]
all_hours = len(data)
# Get hours with good conditions
result = data[
(data.cloudCover < self.conditions.max_clouds) &
(data.precipProbability < self.conditions.max_precipitation_probability) &
(data.windSpeed < self.conditions.max_wind) &
(data.temperature > self.conditions.min_temperature) &
(data.temperature < self.conditions.max_temperature)]
good_hours = len(result)
logger.debug("Good hours: {} and all hours: {}".format(good_hours, all_hours))
# Return relative % of good hours
return good_hours / all_hours * 100
def is_weather_good(self):
if self.place.weather is None:
self.place.get_weather()
return self._compute_weather_goodnse() > self.conditions.min_weather_goodness
def to_html(self):
with open(Observation.NOTIFICATION_TEMPLATE) as template_file:
template = Template(template_file.read())
data = {
"title": "APTS",
"start": Utils.format_date(self.start),
"stop": Utils.format_date(self.stop),
"planets_count": len(self.get_visible_planets()),
"messier_count": len(self.get_visible_messier()),
"planets_table": self.get_visible_planets().to_html(),
"messier_table": self.get_visible_messier().to_html(),
"equipment_table": self.equipment.data().to_html(),
"place_name": self.place.name,
"lat": numpy.rad2deg(self.place.lat),
"lon": numpy.rad2deg(self.place.lon)
}
return str(template.substitute(data))
def _mark_observation(self, plot):
# Check if there is a plot
if plot is None:
return
# Add marker for night
plot.axvspan(self.start, self.stop, color='gray', alpha=0.2)
# Add marker for moon
moon_start, moon_stop = self._normalize_dates(
self.place.moonrise_time(), self.place.moonset_time())
plot.axvspan(moon_start, moon_stop, color='yellow', alpha=0.1)
# Add marker for time limit
plot.axvline(self.start, color='orange', linestyle='--')
plot.axvline(self.time_limit, color='orange', linestyle='--')
def _mark_good_conditions(self, plot, minimal, maximal):
# Check if there is a plot
if plot is None:
return
plot.axhspan(minimal, maximal, color='green', alpha=0.1)
def _generate_plot_weather(self, **args):
fig, axes = pyplot.subplots(nrows=4, ncols=2, figsize=(13, 18))
# Clouds
plt = self.place.weather.plot_clouds(ax=axes[0, 0])
self._mark_observation(plt)
self._mark_good_conditions(plt, 0, self.conditions.max_clouds)
# Cloud summary
plt = self.place.weather.plot_clouds_summary(ax=axes[0, 1])
# Precipation
plt = self.place.weather.plot_precipitation(ax=axes[1, 0])
self._mark_observation(plt)
self._mark_good_conditions(
plt, 0, self.conditions.max_precipitation_probability)
# precipitation type summary
plt = self.place.weather.plot_precipitation_type_summary(ax=axes[1, 1])
# Temperature
plt = self.place.weather.plot_temperature(ax=axes[2, 0])
self._mark_observation(plt)
self._mark_good_conditions(
plt, self.conditions.min_temperature, self.conditions.max_temperature)
# Wind
plt = self.place.weather.plot_wind(ax=axes[2, 1])
self._mark_observation(plt)
self._mark_good_conditions(plt, 0, self.conditions.max_wind)
# Pressure
plt = self.place.weather.plot_pressure_and_ozone(ax=axes[3, 0])
self._mark_observation(plt)
# Visibility
plt = self.place.weather.plot_visibility(ax=axes[3, 1])
self._mark_observation(plt)
fig.tight_layout()
return fig
| apache-2.0 |
amanzotti/paris_scraping | fetch.py | 1 | 17484 | import requests
from bs4 import BeautifulSoup
import sys
import numpy as np
# then add this function lower down
from memory_profiler import profile
import pandas as pd
from sortedcontainers import SortedDict
import datetime
import bs4
# TODO
# http://www.meilleursagents.com/immobilier/recherche/?item_types%5B%5D=369681781&item_types%5B%5D=369681782&transaction_type=369681778&place_ids%5B%5D=32696
# http://www.seloger.com/list.htm?idtt=1&idtypebien=1&cp=75&tri=initial
def parse_source(html, encoding='utf-8'):
parsed = BeautifulSoup(html, from_encoding=encoding)
return parsed
def fetch_meilleursagents():
base = 'http://www.meilleursagents.com/immobilier/recherche/?redirect_url=&view_mode=list&sort_mode=ma_contract%7Cdesc&transaction_type=369681778&buyer_search_id=&user_email=&place_ids%5B%5D=138724240&place_title=&item_types%5B%5D=369681781&item_types%5B%5D=369681782&item_area_min=&item_area_max=&budget_min=&budget_max='
resp = requests.get(base, timeout=150)
resp.raise_for_status() # <- no-op if status==200
parsed = parse_source(resp.content, resp.encoding)
def fetch_solger():
base = 'http://www.seloger.com/list.htm?idtt=1&idtypebien=1&cp=75&tri=initial'
resp = requests.get(base, timeout=150)
resp.raise_for_status() # <- no-op if status==200
parsed = parse_source(resp.content, resp.encoding)
def fetch_pap():
base = 'http://www.pap.fr/annonce/locations-appartement-paris-14e-g37781'
try:
resp = requests.get(base, timeout=150)
resp.raise_for_status() # <- no-op if status==200
resp_comb = resp.content
except:
pass
listing = []
string = {}
string[15] = '15e-g37782'
string[13] = '13e-g37780'
string[14] = '14e-g37781'
string[2] = '2e-g37769'
string[3] = '3e-g37770'
string[4] = '4e-g37771'
string[5] = '5e-g37772'
string[6] = '6e-g37773'
string[7] = '7e-g37774'
string[8] = '8e-g37775'
string[9] = '9e-g37776'
string[10] = '10e-g37777'
string[11] = '11e-g37778'
string[12] = '12e-g37779'
string[16] = '16e-g37783'
string[17] = '17e-g37784'
string[18] = '18e-g37785'
string[19] = '19e-g37786'
string[20] = '20e-g37787'
for i in np.arange(2, 20):
print(i)
base2 = 'http://www.pap.fr/annonce/locations-appartement-paris-{}'.format(string[i])
try:
resp_ = requests.get(base2, timeout=200)
except:
break
# resp_.raise_for_status() # <- no-op if status==200
if resp_.status_code == 404:
break
parsed = parse_source(resp_.content, resp_.encoding)
listing.append(extract_listings_pap(parsed))
# print(listing)
# resp_comb += resp_.content + resp_comb
for j in np.arange(1, 7):
print(j)
base2 = 'http://www.pap.fr/annonce/locations-appartement-paris-{}-{}'.format(
string[i], j)
try:
resp_ = requests.get(base2, timeout=200)
except:
break
# resp_.raise_for_status() # <- no-op if status==200
if resp_.status_code == 404:
break
# resp_comb += resp_.content + resp_comb
parsed = parse_source(resp_.content, resp_.encoding)
listing.append(extract_listings_pap(parsed))
# return resp_comb, resp.encoding
return listing
def fetch_fusac():
base = 'http://ads.fusac.fr/ad-category/housing/'
listing = []
try:
resp = requests.get(base, timeout=100)
resp.raise_for_status() # <- no-op if status==200
resp_comb = resp.content
parsed = parse_source(resp.content, resp.encoding)
listing.append(extract_listings_fusac(parsed))
except:
pass
for i in np.arange(2, 6):
base2 = 'http://ads.fusac.fr/ad-category/housing/housing-offers/page/{}/'.format(i)
try:
resp_ = requests.get(base2, timeout=100)
except:
continue
# resp_.raise_for_status() # <- no-op if status==200
if resp_.status_code == 404:
break
# resp_comb += resp_.content + resp_comb
parsed = parse_source(resp_.content, resp_.encoding)
listing.append(extract_listings_fusac(parsed))
# return resp_comb, resp.encoding
return listing
# handle response 200
def fetch_search_results(
query=None, minAsk=600, maxAsk=1450, bedrooms=None, bundleDuplicates=1,
pets_cat=1
):
listing = []
search_params = {
key: val for key, val in locals().items() if val is not None
}
if not search_params:
raise ValueError("No valid keywords")
base = 'https://paris.craigslist.fr/search/apa'
try:
resp_ = requests.get(base, params=search_params, timeout=100)
resp_.raise_for_status() # <- no-op if status==200
parsed = parse_source(resp_.content, resp_.encoding)
listing.append(extract_listings(parsed))
except:
return None
return listing
# def extract_listings(parsed):
# listings = parsed.find_all("li", {"class": "result-row"})
# return listings
def extract_listings_fusac(parsed):
# location_attrs = {'data-latitude': True, 'data-longitude': True}
listings = parsed.find_all(
'div', {'class': "prod-cnt prod-box shadow Just-listed"})
extracted = []
for j, listing in enumerate(listings[0:]):
# hood = listing.find('span', {'class': 'result-hood'})
# # print(hood)
# # location = {key: listing.attrs.get(key, '') for key in location_attrs}
# link = listing.find('a', {'class': 'result-title hdrlnk'}) # add this
# if link is not None:
# descr = link.string.strip()
# link_href = link.attrs['href']
price = listing.find('p', {'class': 'post-price'})
if price is not None:
price = float(price.string.split()[0].replace(',', ''))
# link = listing.find('div', {'class': 'listos'}).find('a',href=True)['href']
# resp = requests.get(link, timeout=10)
# resp.raise_for_status() # <- no-op if status==200
desc = listing.find('p', {'class': 'post-desc'}
)
if price is not None:
desc = desc.string
url = listing.find('div', {'class': "post-left"}).find('div', {'class': "grido"}).find('a', href=True).get('href')
resp = requests.get(url, timeout=100)
resp.raise_for_status() # <- no-op if status==200
parse = parse_source(resp.content, resp.encoding)
try:
ars = int(parse.find('div', {'class': "single-main"}).find('li', {'class': "acf-details-item"}, id="acf-cp_zipcode").find('span', {'class': 'acf-details-val'}).string[-2:])
except:
ars = None
this_listing = {
# 'location': location,
# 'link': link_href, # add this too
'price': price,
'desc': desc,
# ====
# 'description': descr,
'pieces': None,
'meters': None,
'chambre': None,
'ars': ars,
'link': None
}
extracted.append(SortedDict(this_listing))
return extracted
def extract_listings_pap(parsed):
# location_attrs = {'data-latitude': True, 'data-longitude': True}
listings = parsed.find_all(
'div', {'class': "box search-results-item"})
extracted = []
for listing in listings[0:]:
# hood = listing.find('span', {'class': 'result-hood'})
# # print(hood)
# # location = {key: listing.attrs.get(key, '') for key in location_attrs}
# link = listing.find('a', {'class': 'result-title hdrlnk'}) # add this
# if link is not None:
# descr = link.string.strip()
# link_href = link.attrs['href']
price = listing.find('span', {'class': 'price'})
if price is not None:
price = float(price.string.split()[0].replace('.', ''))
ref = listing.find('div', {'class': 'float-right'}).find('a', href=True)['href']
base = 'http://www.pap.fr/' + ref
try:
resp = requests.get(base, timeout=100)
except:
break
link = base
resp.raise_for_status() # <- no-op if status==200
resp_comb = parse_source(resp.content, resp.encoding)
descr = resp_comb.find_all('p', {'class': 'item-description'})[0]
desc = ' '
for line in descr.contents:
if isinstance(line, bs4.element.NavigableString):
desc += ' ' + line.string.strip('<\br>').strip('\n')
# return resp_comb.find_all(
# 'ul', {'class': 'item-summary'})
try:
ars = int(resp_comb.find(
'div', {'class': 'item-geoloc'}).find('h2').string.split('e')[0][-2:])
except:
break
# return resp_comb.find_all('ul', {'class': 'item-summary'})[0].find_all('li')
# print(resp_comb.find_all('ul', {'class': 'item-summary'})[0].find_all('li'))
temp_dict_ = {}
for lines in resp_comb.find_all('ul', {'class': 'item-summary'})[0].find_all('li'):
tag = lines.contents[0].split()[0]
value = int(lines.find_all('strong')[0].string.split()[0])
temp_dict_[tag] = value
try:
pieces = temp_dict_[u'Pi\xe8ces']
except:
pieces = None
try:
chambre = temp_dict_[u'Chambre']
except:
chambre = None
try:
square_meters = temp_dict_['Surface']
except:
square_meters = None
# meters = resp_comb.find_all('ul', {'class': 'item-summary'}
# )[0].find_all('strong').string.split()[0]
# link = listing.find('div', {'class': 'listos'}).find('a',href=True)['href']
# resp = requests.get(link, timeout=10)
# resp.raise_for_status() # <- no-op if status==200
# desc = listing.find('p', {'class': 'post-desc'}
# )
# if price is not None:
# desc = desc.string
# housing = listing.find('span', {'class': 'housing'})
# if housing is not None:
# beds = housing.decode_contents().split('br')[0][-1]
# rm = housing.decode_contents().split('m<sup>2</sup>')[0]
# sqm = [int(s) for s in rm.split() if s.isdigit()]
# if len(sqm) == 0:
# sqm = None
# else:
# sqm = int(sqm[0])
this_listing = {
# 'location': location,
# 'link': link_href, # add this too
# 'description': descr, # and this
'price': price,
'desc': desc,
'pieces': pieces,
'meters': square_meters,
'chambre': chambre,
'ars': ars,
# 'meters': sqm,
# 'beds': beds
'link': link
}
extracted.append(SortedDict(this_listing))
return extracted
def extract_listings_solger(parsed):
# location_attrs = {'data-latitude': True, 'data-longitude': True}
listings = parsed.find_all(
'article', {'class': "listing life_annuity gold"})
extracted = []
return listings
# for listing in listings[0:]:
# # hood = listing.find('span', {'class': 'result-hood'})
# # # print(hood)
# # # location = {key: listing.attrs.get(key, '') for key in location_attrs}
# # link = listing.find('a', {'class': 'result-title hdrlnk'}) # add this
# # if link is not None:
# # descr = link.string.strip()
# # link_href = link.attrs['href']
# price = listing.find('span', {'class': 'price'})
# if price is not None:
# price = float(price.string.split()[0].replace('.', ''))
# ref = listing.find('div', {'class': 'float-right'}).find('a', href=True)['href']
# base = 'http://www.pap.fr/' + ref
# resp = requests.get(base, timeout=20)
# link = base
# resp.raise_for_status() # <- no-op if status==200
# resp_comb = parse_source(resp.content, resp.encoding)
# descr = resp_comb.find_all('p', {'class': 'item-description'})[0]
# desc = ' '
# for line in descr.contents:
# if isinstance(line, bs4.element.NavigableString):
# desc += ' ' + line.string.strip('<\br>').strip('\n')
# # return resp_comb.find_all(
# # 'ul', {'class': 'item-summary'})
# try:
# ars = int(resp_comb.find(
# 'div', {'class': 'item-geoloc'}).find('h2').string.split('e')[0][-2:])
# except:
# break
# # return resp_comb.find_all('ul', {'class': 'item-summary'})[0].find_all('li')
# # print(resp_comb.find_all('ul', {'class': 'item-summary'})[0].find_all('li'))
# temp_dict_ = {}
# for lines in resp_comb.find_all('ul', {'class': 'item-summary'})[0].find_all('li'):
# tag = lines.contents[0].split()[0]
# value = int(lines.find_all('strong')[0].string.split()[0])
# temp_dict_[tag] = value
# try:
# pieces = temp_dict_[u'Pi\xe8ces']
# except:
# pieces = None
# try:
# chambre = temp_dict_[u'Chambre']
# except:
# chambre = None
# try:
# square_meters = temp_dict_['Surface']
# except:
# square_meters = None
# # meters = resp_comb.find_all('ul', {'class': 'item-summary'}
# # )[0].find_all('strong').string.split()[0]
# # link = listing.find('div', {'class': 'listos'}).find('a',href=True)['href']
# # resp = requests.get(link, timeout=10)
# # resp.raise_for_status() # <- no-op if status==200
# # desc = listing.find('p', {'class': 'post-desc'}
# # )
# # if price is not None:
# # desc = desc.string
# # housing = listing.find('span', {'class': 'housing'})
# # if housing is not None:
# # beds = housing.decode_contents().split('br')[0][-1]
# # rm = housing.decode_contents().split('m<sup>2</sup>')[0]
# # sqm = [int(s) for s in rm.split() if s.isdigit()]
# # if len(sqm) == 0:
# # sqm = None
# # else:
# # sqm = int(sqm[0])
# this_listing = {
# # 'location': location,
# # 'link': link_href, # add this too
# # 'description': descr, # and this
# 'price': price,
# 'desc': desc,
# 'pieces': pieces,
# 'meters': square_meters,
# 'chambre': chambre,
# 'ars': ars,
# # 'meters': sqm,
# # 'beds': beds
# 'link': link
# }
# extracted.append(SortedDict(this_listing))
# return extracted
# parsed.find_all(
# ...: 'div', {'class': "box search-results-item"})[0].find('div',{'class':'float-right'}).find('a',href=True)['href']
def extract_listings(parsed):
# location_attrs = {'data-latitude': True, 'data-longitude': True}
listings = parsed.find_all("li", {"class": "result-row"})
extracted = []
for listing in listings[2:]:
hood = listing.find('span', {'class': 'result-hood'})
# print(hood)
# location = {key: listing.attrs.get(key, '') for key in location_attrs}
link = listing.find('a', {'class': 'result-title hdrlnk'}) # add this
if link is not None:
descr = link.string.strip()
link_href = link.attrs['href']
price = listing.find('span', {'class': 'result-price'})
if price is not None:
if price.string is not None:
price = int(price.string[1:])
housing = listing.find('span', {'class': 'housing'})
if housing is not None:
beds = housing.decode_contents().split('br')[0][-1]
rm = housing.decode_contents().split('m<sup>2</sup>')[0]
sqm = [int(s) for s in rm.split() if s.isdigit()]
if len(sqm) == 0:
sqm = None
else:
sqm = int(sqm[0])
this_listing = {
# 'location': location,
'link': link_href, # add this too
'desc': descr, # and this
'price': price,
'meters': sqm,
'chambre': beds,
'pieces': None,
'ars': None
}
extracted.append(SortedDict(this_listing))
return extracted
if __name__ == '__main__':
# df = pd.read_pickle('./ipapartment_paris.pk')
df = pd.DataFrame
resu = []
print('loading fusac')
resu.append(fetch_fusac())
print('loading pap')
resu.append(fetch_pap())
print('loading craig')
resu.append(fetch_search_results())
flat = [item for lis in resu for lis1 in lis for item in lis1]
df_new = pd.DataFrame(flat)
print('saving..')
# df_new.to_pickle('./apartment_paris_{}.pk'.format(str(datetime.datetime.now())))
# df = pd.concat([df, df_new])
df_new.to_pickle('./apartment_paris.pk')
print('Done.')
| mit |
gitcoinco/web | app/dashboard/embed.py | 1 | 10901 | from django.http import HttpResponse, JsonResponse
from django.template import loader
from django.utils import timezone
from django.utils.cache import patch_response_headers
import requests
from dashboard.models import Bounty
from git.utils import get_user, org_name
from PIL import Image, ImageDraw, ImageFont
from ratelimit.decorators import ratelimit
AVATAR_BASE = 'assets/other/avatars/'
def wrap_text(text, w=30):
new_text = ""
new_sentence = ""
for word in text.split(" "):
delim = " " if new_sentence != "" else ""
new_sentence = new_sentence + delim + word
if len(new_sentence) > w:
new_text += "\n" + new_sentence
new_sentence = ""
new_text += "\n" + new_sentence
return new_text
def summarize_bounties(bounties):
val_usdt = sum(bounties.values_list('_val_usd_db', flat=True))
if val_usdt < 1:
return False, ""
currency_to_value = {bounty.token_name: 0.00 for bounty in bounties}
for bounty in bounties:
currency_to_value[bounty.token_name] += float(bounty.value_true)
other_values = ", ".join([
f"{round(value, 2)} {token_name}"
for token_name, value in currency_to_value.items()
])
is_plural = 's' if bounties.count() > 1 else ''
return True, f"Total: {bounties.count()} issue{is_plural}, {val_usdt} USD, {other_values}"
@ratelimit(key='ip', rate='50/m', method=ratelimit.UNSAFE, block=True)
def stat(request, key):
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from matplotlib.dates import DateFormatter
from marketing.models import Stat
limit = 10
weekly_stats = Stat.objects.filter(key=key).order_by('created_on')
# weekly stats only
weekly_stats = weekly_stats.filter(
created_on__hour=1,
created_on__week_day=1
).filter(
created_on__gt=(timezone.now() - timezone.timedelta(weeks=7))
)
daily_stats = Stat.objects.filter(key=key) \
.filter(
created_on__gt=(timezone.now() - timezone.timedelta(days=7))
).order_by('created_on')
daily_stats = daily_stats.filter(created_on__hour=1) # daily stats only
stats = weekly_stats if weekly_stats.count() < limit else daily_stats
fig = Figure(figsize=(1.6, 1.5), dpi=80, facecolor='w', edgecolor='k')
ax = fig.add_subplot(111)
x = []
y = []
for stat in stats:
x.append(stat.created_on)
y.append(stat.val)
x = x[-1 * limit:]
y = y[-1 * limit:]
ax.plot_date(x, y, '-')
ax.set_axis_off()
ax.xaxis.set_major_formatter(DateFormatter('%Y-%m-%d'))
if stats.count() > 1:
ax.set_title("Usage over time", y=0.9)
else:
ax.set_title("(Not enough data)", y=0.3)
fig.autofmt_xdate()
canvas = FigureCanvas(fig)
response = HttpResponse(content_type='image/png')
canvas.print_png(response)
return response
@ratelimit(key='ip', rate='50/m', method=ratelimit.UNSAFE, block=True)
def embed(request):
# default response
could_not_find = Image.new('RGBA', (1, 1), (0, 0, 0, 0))
err_response = HttpResponse(content_type="image/jpeg")
could_not_find.save(err_response, "JPEG")
# Get maxAge GET param if provided, else default on the small side
max_age = int(request.GET.get('maxAge', 3600))
# params
repo_url = request.GET.get('repo', False)
if not repo_url or 'github.com' not in repo_url:
return err_response
try:
badge = request.GET.get('badge', False)
if badge:
open_bounties = Bounty.objects.current() \
.filter(
github_url__startswith=repo_url,
network='mainnet',
idx_status__in=['open']
)
tmpl = loader.get_template('svg_badge.txt')
response = HttpResponse(
tmpl.render({'bounties_count': open_bounties.count()}),
content_type='image/svg+xml',
)
patch_response_headers(response, cache_timeout=max_age)
return response
# get avatar of repo
_org_name = org_name(repo_url)
avatar = None
filename = f"{_org_name}.png"
filepath = 'assets/other/avatars/' + filename
try:
avatar = Image.open(filepath, 'r').convert("RGBA")
except IOError:
remote_user = get_user(_org_name)
if not remote_user.get('avatar_url', False):
return JsonResponse({'msg': 'invalid user'}, status=422)
remote_avatar_url = remote_user['avatar_url']
r = requests.get(remote_avatar_url, stream=True)
chunk_size = 20000
with open(filepath, 'wb') as fd:
for chunk in r.iter_content(chunk_size):
fd.write(chunk)
avatar = Image.open(filepath, 'r').convert("RGBA")
# make transparent
datas = avatar.getdata()
new_data = []
for item in datas:
if item[0] == 255 and item[1] == 255 and item[2] == 255:
new_data.append((255, 255, 255, 0))
else:
new_data.append(item)
avatar.putdata(new_data)
avatar.save(filepath, "PNG")
# get issues
length = request.GET.get('len', 10)
super_bounties = Bounty.objects.current() \
.filter(
github_url__startswith=repo_url,
network='mainnet',
idx_status__in=['open', 'started', 'submitted']
).order_by('-_val_usd_db')
bounties = super_bounties[:length]
# config
bounty_height = 200
bounty_width = 572
font = 'assets/v2/fonts/futura/FuturaStd-Medium.otf'
width = 1776
height = 576
# setup
img = Image.new("RGBA", (width, height), (255, 255, 255))
draw = ImageDraw.Draw(img)
black = (0, 0, 0)
gray = (102, 102, 102)
h1 = ImageFont.truetype(font, 36, encoding="unic")
h2_thin = ImageFont.truetype(font, 36, encoding="unic")
p = ImageFont.truetype(font, 24, encoding="unic")
# background
background_image = 'assets/v2/images/embed-widget/background.png'
back = Image.open(background_image, 'r').convert("RGBA")
offset = 0, 0
img.paste(back, offset)
# repo logo
icon_size = (184, 184)
avatar.thumbnail(icon_size, Image.ANTIALIAS)
offset = 195, 148
img.paste(avatar, offset, avatar)
img_org_name = ImageDraw.Draw(img)
img_org_name_size = img_org_name.textsize(_org_name, h1)
img_org_name.multiline_text(
align="left",
xy=(287 - img_org_name_size[0] / 2, 360),
text=_org_name,
fill=black,
font=h1,
)
draw.multiline_text(
align="left",
xy=(110, 410),
text="supports funded issues",
fill=black,
font=h1,
)
# put bounty list in there
i = 0
for bounty in bounties[:4]:
i += 1
# execute
line_size = 2
# Limit text to 28 chars
text = f"{bounty.title_or_desc}"
text = (text[:28] + '...') if len(text) > 28 else text
x = 620 + (int((i-1)/line_size) * (bounty_width))
y = 230 + (abs(i % line_size-1) * bounty_height)
draw.multiline_text(align="left", xy=(x, y), text=text, fill=black, font=h2_thin)
unit = 'day'
num = int(round((bounty.expires_date - timezone.now()).days, 0))
if num == 0:
unit = 'hour'
num = int(round((bounty.expires_date - timezone.now()).seconds / 3600 / 24, 0))
unit = unit + ("s" if num != 1 else "")
draw.multiline_text(
align="left",
xy=(x, y - 40),
text=f"Expires in {num} {unit}:",
fill=gray,
font=p,
)
bounty_eth_background = Image.new("RGBA", (200, 56), (231, 240, 250))
bounty_usd_background = Image.new("RGBA", (200, 56), (214, 251, 235))
img.paste(bounty_eth_background, (x, y + 50))
img.paste(bounty_usd_background, (x + 210, y + 50))
tmp = ImageDraw.Draw(img)
bounty_value_size = tmp.textsize(f"{round(bounty.value_true, 2)} {bounty.token_name}", p)
draw.multiline_text(
align="left",
xy=(x + 100 - bounty_value_size[0]/2, y + 67),
text=f"{round(bounty.value_true, 2)} {bounty.token_name}",
fill=(44, 35, 169),
font=p,
)
bounty_value_size = tmp.textsize(f"{round(bounty.value_in_usdt_now, 2)} USD", p)
draw.multiline_text(
align="left",
xy=(x + 310 - bounty_value_size[0]/2, y + 67),
text=f"{round(bounty.value_in_usdt_now, 2)} USD",
fill=(45, 168, 116),
font=p,
)
# blank slate
if bounties.count() == 0:
draw.multiline_text(
align="left",
xy=(760, 320),
text="No active issues. Post a funded issue at: https://gitcoin.co",
fill=gray,
font=h1,
)
if bounties.count() != 0:
text = 'Browse issues at: https://gitcoin.co/explorer'
draw.multiline_text(
align="left",
xy=(64, height - 70),
text=text,
fill=gray,
font=p,
)
draw.multiline_text(
align="left",
xy=(624, 120),
text="Recently funded issues:",
fill=(62, 36, 251),
font=p,
)
_, value = summarize_bounties(super_bounties)
value_size = tmp.textsize(value, p)
draw.multiline_text(
align="left",
xy=(1725 - value_size[0], 120),
text=value,
fill=gray,
font=p,
)
line_table_header = Image.new("RGBA", (1100, 6), (62, 36, 251))
img.paste(line_table_header, (624, 155))
# Resize back to output size for better anti-alias
img = img.resize((888, 288), Image.LANCZOS)
# Return image with right content-type
response = HttpResponse(content_type="image/png")
img.save(response, "PNG")
patch_response_headers(response, cache_timeout=max_age)
return response
except IOError as e:
print(e)
return err_response
| agpl-3.0 |
zaxtax/scikit-learn | sklearn/decomposition/tests/test_online_lda.py | 5 | 13165 | import numpy as np
from scipy.linalg import block_diag
from scipy.sparse import csr_matrix
from scipy.special import psi
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.decomposition._online_lda import (_dirichlet_expectation_1d,
_dirichlet_expectation_2d)
from sklearn.utils.testing import assert_allclose
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import if_safe_multiprocessing_with_blas
from sklearn.exceptions import NotFittedError
from sklearn.externals.six.moves import xrange
def _build_sparse_mtx():
# Create 3 topics and each topic has 3 distinct words.
# (Each word only belongs to a single topic.)
n_topics = 3
block = n_topics * np.ones((3, 3))
blocks = [block] * n_topics
X = block_diag(*blocks)
X = csr_matrix(X)
return (n_topics, X)
def test_lda_default_prior_params():
# default prior parameter should be `1 / topics`
# and verbose params should not affect result
n_topics, X = _build_sparse_mtx()
prior = 1. / n_topics
lda_1 = LatentDirichletAllocation(n_topics=n_topics, doc_topic_prior=prior,
topic_word_prior=prior, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, random_state=0)
topic_distr_1 = lda_1.fit_transform(X)
topic_distr_2 = lda_2.fit_transform(X)
assert_almost_equal(topic_distr_1, topic_distr_2)
def test_lda_fit_batch():
# Test LDA batch learning_offset (`fit` method with 'batch' learning)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, evaluate_every=1,
learning_method='batch', random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_fit_online():
# Test LDA online learning (`fit` method with 'online' learning)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=10.,
evaluate_every=1, learning_method='online',
random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_partial_fit():
# Test LDA online learning (`partial_fit` method)
# (same as test_lda_batch)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=10.,
total_samples=100, random_state=rng)
for i in xrange(3):
lda.partial_fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_dense_input():
# Test LDA with dense input.
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_method='batch',
random_state=rng)
lda.fit(X.toarray())
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_transform():
# Test LDA transform.
# Transform result cannot be negative
rng = np.random.RandomState(0)
X = rng.randint(5, size=(20, 10))
n_topics = 3
lda = LatentDirichletAllocation(n_topics=n_topics, random_state=rng)
X_trans = lda.fit_transform(X)
assert_true((X_trans > 0.0).any())
def test_lda_fit_transform():
# Test LDA fit_transform & transform
# fit_transform and transform result should be the same
for method in ('online', 'batch'):
rng = np.random.RandomState(0)
X = rng.randint(10, size=(50, 20))
lda = LatentDirichletAllocation(n_topics=5, learning_method=method,
random_state=rng)
X_fit = lda.fit_transform(X)
X_trans = lda.transform(X)
assert_array_almost_equal(X_fit, X_trans, 4)
def test_lda_partial_fit_dim_mismatch():
# test `n_features` mismatch in `partial_fit`
rng = np.random.RandomState(0)
n_topics = rng.randint(3, 6)
n_col = rng.randint(6, 10)
X_1 = np.random.randint(4, size=(10, n_col))
X_2 = np.random.randint(4, size=(10, n_col + 1))
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=5.,
total_samples=20, random_state=rng)
lda.partial_fit(X_1)
assert_raises_regexp(ValueError, r"^The provided data has",
lda.partial_fit, X_2)
def test_invalid_params():
# test `_check_params` method
X = np.ones((5, 10))
invalid_models = (
('n_topics', LatentDirichletAllocation(n_topics=0)),
('learning_method',
LatentDirichletAllocation(learning_method='unknown')),
('total_samples', LatentDirichletAllocation(total_samples=0)),
('learning_offset', LatentDirichletAllocation(learning_offset=-1)),
)
for param, model in invalid_models:
regex = r"^Invalid %r parameter" % param
assert_raises_regexp(ValueError, regex, model.fit, X)
def test_lda_negative_input():
# test pass dense matrix with sparse negative input.
X = -np.ones((5, 10))
lda = LatentDirichletAllocation()
regex = r"^Negative values in data passed"
assert_raises_regexp(ValueError, regex, lda.fit, X)
def test_lda_no_component_error():
# test `transform` and `perplexity` before `fit`
rng = np.random.RandomState(0)
X = rng.randint(4, size=(20, 10))
lda = LatentDirichletAllocation()
regex = r"^no 'components_' attribute"
assert_raises_regexp(NotFittedError, regex, lda.transform, X)
assert_raises_regexp(NotFittedError, regex, lda.perplexity, X)
def test_lda_transform_mismatch():
# test `n_features` mismatch in partial_fit and transform
rng = np.random.RandomState(0)
X = rng.randint(4, size=(20, 10))
X_2 = rng.randint(4, size=(10, 8))
n_topics = rng.randint(3, 6)
lda = LatentDirichletAllocation(n_topics=n_topics, random_state=rng)
lda.partial_fit(X)
assert_raises_regexp(ValueError, r"^The provided data has",
lda.partial_fit, X_2)
@if_safe_multiprocessing_with_blas
def test_lda_multi_jobs():
n_topics, X = _build_sparse_mtx()
# Test LDA batch training with multi CPU
for method in ('online', 'batch'):
rng = np.random.RandomState(0)
lda = LatentDirichletAllocation(n_topics=n_topics, n_jobs=2,
learning_method=method,
random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
@if_safe_multiprocessing_with_blas
def test_lda_partial_fit_multi_jobs():
# Test LDA online training with multi CPU
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, n_jobs=2,
learning_offset=5., total_samples=30,
random_state=rng)
for i in range(2):
lda.partial_fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_preplexity_mismatch():
# test dimension mismatch in `perplexity` method
rng = np.random.RandomState(0)
n_topics = rng.randint(3, 6)
n_samples = rng.randint(6, 10)
X = np.random.randint(4, size=(n_samples, 10))
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=5.,
total_samples=20, random_state=rng)
lda.fit(X)
# invalid samples
invalid_n_samples = rng.randint(4, size=(n_samples + 1, n_topics))
assert_raises_regexp(ValueError, r'Number of samples', lda.perplexity, X,
invalid_n_samples)
# invalid topic number
invalid_n_topics = rng.randint(4, size=(n_samples, n_topics + 1))
assert_raises_regexp(ValueError, r'Number of topics', lda.perplexity, X,
invalid_n_topics)
def test_lda_perplexity():
# Test LDA perplexity for batch training
# perplexity should be lower after each iteration
n_topics, X = _build_sparse_mtx()
for method in ('online', 'batch'):
lda_1 = LatentDirichletAllocation(n_topics=n_topics, max_iter=1,
learning_method=method,
total_samples=100, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, max_iter=10,
learning_method=method,
total_samples=100, random_state=0)
distr_1 = lda_1.fit_transform(X)
perp_1 = lda_1.perplexity(X, distr_1, sub_sampling=False)
distr_2 = lda_2.fit_transform(X)
perp_2 = lda_2.perplexity(X, distr_2, sub_sampling=False)
assert_greater_equal(perp_1, perp_2)
perp_1_subsampling = lda_1.perplexity(X, distr_1, sub_sampling=True)
perp_2_subsampling = lda_2.perplexity(X, distr_2, sub_sampling=True)
assert_greater_equal(perp_1_subsampling, perp_2_subsampling)
def test_lda_score():
# Test LDA score for batch training
# score should be higher after each iteration
n_topics, X = _build_sparse_mtx()
for method in ('online', 'batch'):
lda_1 = LatentDirichletAllocation(n_topics=n_topics, max_iter=1,
learning_method=method,
total_samples=100, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, max_iter=10,
learning_method=method,
total_samples=100, random_state=0)
lda_1.fit_transform(X)
score_1 = lda_1.score(X)
lda_2.fit_transform(X)
score_2 = lda_2.score(X)
assert_greater_equal(score_2, score_1)
def test_perplexity_input_format():
# Test LDA perplexity for sparse and dense input
# score should be the same for both dense and sparse input
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=1,
learning_method='batch',
total_samples=100, random_state=0)
distr = lda.fit_transform(X)
perp_1 = lda.perplexity(X)
perp_2 = lda.perplexity(X, distr)
perp_3 = lda.perplexity(X.toarray(), distr)
assert_almost_equal(perp_1, perp_2)
assert_almost_equal(perp_1, perp_3)
def test_lda_score_perplexity():
# Test the relationship between LDA score and perplexity
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=10,
random_state=0)
distr = lda.fit_transform(X)
perplexity_1 = lda.perplexity(X, distr, sub_sampling=False)
score = lda.score(X)
perplexity_2 = np.exp(-1. * (score / np.sum(X.data)))
assert_almost_equal(perplexity_1, perplexity_2)
def test_lda_empty_docs():
"""Test LDA on empty document (all-zero rows)."""
Z = np.zeros((5, 4))
for X in [Z, csr_matrix(Z)]:
lda = LatentDirichletAllocation(max_iter=750).fit(X)
assert_almost_equal(lda.components_.sum(axis=0),
np.ones(lda.components_.shape[1]))
def test_dirichlet_expectation():
"""Test Cython version of Dirichlet expectation calculation."""
x = np.logspace(-100, 10, 10000)
expectation = np.empty_like(x)
_dirichlet_expectation_1d(x, 0, expectation)
assert_allclose(expectation, np.exp(psi(x) - psi(np.sum(x))),
atol=1e-19)
x = x.reshape(100, 100)
assert_allclose(_dirichlet_expectation_2d(x),
psi(x) - psi(np.sum(x, axis=1)[:, np.newaxis]),
rtol=1e-11, atol=3e-9)
| bsd-3-clause |
JSeam2/IsoGraph | genetic/genetic_algo.py | 1 | 10666 | """
Implementation referenced from
https://github.com/handcraftsman/GeneticAlgorithmsWithPython/blob/master/ch02/genetic.py
"""
import random
from qutip import *
import numpy as np
import pandas as pd
from functools import reduce
import datetime
import time
import pickle
import copy
# QUTIP NOTES
# hadamard = "SNOT"
# CZ = "CSIGN"
# RZ = "RZ"
def make_circuit(theta_val, save_image = False):
"""
Input theta values to create quantum circuit
[layer 1], [layer 2]. so on
the length of each layer should equal length of
input state
The theta list will act as our genome
theta_val: 2D numpy array
return 2D numpy array of matrices
"""
qc = QubitCircuit(N = len(theta_val[0]))
for i in range(len(theta_val)):
# ADD H gates
qc.add_1q_gate("SNOT", start = 0, end = qc.N)
# add RZ theta gates
for k in range(len(theta_val[0])):
qc.add_1q_gate("RZ", start = k, end = k + 1,
arg_value = theta_val[i][k],
arg_label = theta_val[i][k])
for k in range(len(theta_val[0]) - 1):
qc.add_gate("CSIGN",
targets = [k],
controls = [k+1])
# add a hadamard at the end
qc.add_1q_gate("SNOT", start = 0, end = qc.N)
# produce image
if save_image:
qc.png
return reduce(lambda x, y: x * y, qc.propagators())
def generate_initial_population(N, data, population_size = 20, depth = 5):
"""
population size is the number of individuals in the population
N refers to the number of nodes
depth refers to then number of layers
population_size: int
N: int
"""
genes = []
while len(genes) < population_size:
# we add a +1 to the circuit as use a |0> qubit for measurement
genes.append(np.random.uniform(-np.pi, np.pi, [depth, N*2 + 1]))
fitness, acc = get_fitness(genes, data)
return PopulationPool(genes, fitness, acc)
def generate_children(parent, data, take_best,
population_size = 20,
mutation_rate = 0.05):
"""
produce children with mutations
parent: PopulationPool class
mutation_rate: float probability that value will be mutated
crossover_rate: float probability that genes will cross with another gene
"""
child_genes = []
best_parent_genes = parent.genes[:take_best]
while len(child_genes) < population_size:
# randomly pick a parent
parentA_gene = random.choice(best_parent_genes)
# randomly pick another parent
parentB_gene = random.choice(best_parent_genes)
# crossover the gene at a random point
rand_point = random.randint(0, parentA_gene.shape[0])
if random.random() <= mutation_rate:
# Crossover
if parentB_gene.shape[0] < rand_point:
child_gene = np.vstack((parentA_gene[0:rand_point],
parentB_gene[rand_point, parentB_gene.shape[0]]))
else :
child_gene = parentA_gene
# randomly change values in the array
mask = np.random.randint(0,2,size=child_gene.shape).astype(np.bool)
r = np.random.uniform(-np.pi, np.pi, size = child_gene.shape)
child_gene[mask] = r[mask]
else:
child_gene = parentA_gene
child_genes.append(child_gene)
fitness, acc = get_fitness(child_genes, data)
return PopulationPool(child_genes, fitness, acc)
def evaluate(input_str, circuit):
"""
Evaluate input sequence of bits
Include an additional ancilla qubit in input
for measurement
"""
pass
def get_fitness(genes, data):
"""
Pass in gene and run through the various isomorphic graphs
gene: list of np array
data: panda dataframe from pkl file
returns list of fitness
"""
# total number of samples
num_sample = data.shape[0]
# select upper diagonal ignoring zeros in the middle
size = data["G1"][0].shape[0]
upper = np.triu_indices(size, 1)
# create projector we project to 0 standard basis
projector = basis(2,0) * basis(2,0).dag()
for i in range(size * 2):
projector = tensor(projector, identity(2))
fitness_list = []
acc_list = []
for gene in genes:
loss = 0
correct = 0
# make circuit using the genes
circuit = make_circuit(gene, False)
for index, row in data.iterrows():
#if index % 2500 == 0:
# print("running {}".format(index))
# add a |0> to the last qubit as we will use
# it for measurements
combined = row["G1"][upper].tolist()[0] + \
row["G2"][upper].tolist()[0]
combined.append("0")
int_comb = [int(i) for i in combined]
inputval = bra(int_comb)
result = inputval * circuit
density = result.dag() * result
# We will use the logisitc regression loss function
# as we are dealing with a classification problem
# compare this expectation with result
# expectation here refers to the likelihood of getting 0
expectation = expect(projector, density)
actual = row["is_iso"]
loss += -1 * actual * np.log(1 - expectation) \
- (1 - actual) * np.log(expectation)
if expectation <= 0.50:
# this is 1
prediction = 1
else:
prediction = 0
if prediction == actual:
correct += 1
ave_loss = loss/num_sample
fitness_list.append(ave_loss)
accuracy = correct/num_sample
acc_list.append(accuracy)
return fitness_list, acc_list
def get_best(N, data, num_epoch = 10,
population_size = 20,
take_best = 5,
depth = 5,
mutation_rate = 0.05):
"""
N refers to the number of nodes
Population size refers to the number of individuals in the population
Take_best refers to the number of top individuals we take
depth refers to how deep the quantum circuit should go
mutation_rate refers to the probability of children mutating
"""
assert take_best >= 2
assert population_size >= 2
assert take_best <= population_size
def display(pool):
print("Time: {} \t Best Score: {} \t Best Acc: {}".format(datetime.datetime.now(),
pool.fitness[0],
pool.accuracy[0]))
parent = generate_initial_population(N, data, population_size, depth)
parent.sort()
print("Seed Population")
display(parent)
# take best
for i in range(num_epoch):
child = generate_children(parent, data, take_best, population_size,
mutation_rate)
child.sort()
print()
print("Child")
print("Epoch {}".format(i))
display(child)
# if the parent best fitness is greater than child.fitness get the
# let the child be the parent to get next generation
if parent.fitness[0] > child.fitness[0]:
parent = copy.deepcopy(child)
print("Parent is now the child, New Parent:")
display(parent)
else:
print("Parent retained, Current Parent:")
display(parent)
return parent.genes
class PopulationPool:
def __init__(self, genes, fitness, accuracy):
"""
genes : list of genes
fitness : list of fitness
accuracy : list of accuracy
"""
self.genes = genes
self.fitness = fitness
self.accuracy = accuracy
def sort(self):
"""
returns list of genes sorted by fitness in increasing order
"""
self.genes = [x for _,x in sorted(zip(self.fitness, self.genes))]
self.accuracy = [x for _,x in sorted(zip(self.fitness, self.accuracy))]
if __name__ == "__main__":
print("Start Program")
df = pd.read_pickle("3_node_10000.pkl")
print("Depth = 10")
out_genes = get_best(N=3,
data = df,
num_epoch = 50,
population_size = 20,
take_best = 5,
depth = 10,
mutation_rate = 0.05)
with open("save1.pkl", "wb") as f:
pickle.dump(out_genes,f)
print("==========================")
print("Depth = 15")
out_genes = get_best(N=3,
data = df,
num_epoch = 50,
population_size = 20,
take_best = 5,
depth = 15,
mutation_rate = 0.05)
with open("save2.pkl", "wb") as f:
pickle.dump(out_genes,f)
print("==========================")
print("Depth = 20")
out_genes = get_best(N=3,
data = df,
num_epoch = 50,
population_size = 20,
take_best = 5,
depth = 20,
mutation_rate = 0.05)
with open("save3.pkl", "wb") as f:
pickle.dump(out_genes,f)
print("==========================")
print("Depth = 25")
out_genes = get_best(N=3,
data = df,
num_epoch = 50,
population_size = 20,
take_best = 5,
depth = 25,
mutation_rate = 0.05)
with open("save4.pkl", "wb") as f:
pickle.dump(out_genes,f)
print("==========================")
print("Depth = 30")
out_genes = get_best(N=3,
data = df,
num_epoch = 50,
population_size = 20,
take_best = 5,
depth = 30,
mutation_rate = 0.05)
with open("save5.pkl", "wb") as f:
pickle.dump(out_genes,f)
# to open
#with open("save.pkl", "rb") as f:
# save_genes = pickle.load(f)
# total number of samples
#num_sample = df.shape[0]
## select upper diagonal ignoring zeros in the middle
#size = df["G1"][0].shape[0]
#upper = np.triu_indices(size, 1)
## create projector we project to 0 standard basis
#projector = basis(2,0) * basis(2,0).dag()
#for i in range((size * 2) - 1):
# projector = tensor(projector, identity(2))
#fitness_list = []
#acc_list = []
#parent = generate_initial_population(3, df, 2, 3)
#for gene in parent:
# loss = 0
# correct = 0
# # make circuit using the genes
# circuit = make_circuit(gene, True)
# break
| mit |
alexeyum/scikit-learn | examples/semi_supervised/plot_label_propagation_digits.py | 55 | 2723 | """
===================================================
Label Propagation digits: Demonstrating performance
===================================================
This example demonstrates the power of semisupervised learning by
training a Label Spreading model to classify handwritten digits
with sets of very few labels.
The handwritten digit dataset has 1797 total points. The model will
be trained using all points, but only 30 will be labeled. Results
in the form of a confusion matrix and a series of metrics over each
class will be very good.
At the end, the top 10 most uncertain predictions will be shown.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import confusion_matrix, classification_report
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 30
indices = np.arange(n_total_samples)
unlabeled_set = indices[n_labeled_points:]
# shuffle everything around
y_train = np.copy(y)
y_train[unlabeled_set] = -1
###############################################################################
# Learn with LabelSpreading
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_set]
true_labels = y[unlabeled_set]
cm = confusion_matrix(true_labels, predicted_labels, labels=lp_model.classes_)
print("Label Spreading model: %d labeled & %d unlabeled points (%d total)" %
(n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# calculate uncertainty values for each transduced distribution
pred_entropies = stats.distributions.entropy(lp_model.label_distributions_.T)
# pick the top 10 most uncertain labels
uncertainty_index = np.argsort(pred_entropies)[-10:]
###############################################################################
# plot
f = plt.figure(figsize=(7, 5))
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(2, 5, index + 1)
sub.imshow(image, cmap=plt.cm.gray_r)
plt.xticks([])
plt.yticks([])
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]))
f.suptitle('Learning with small amount of labeled data')
plt.show()
| bsd-3-clause |
vibhorag/scikit-learn | sklearn/cross_decomposition/pls_.py | 187 | 28507 | """
The :mod:`sklearn.pls` module implements Partial Least Squares (PLS).
"""
# Author: Edouard Duchesnay <[email protected]>
# License: BSD 3 clause
from ..base import BaseEstimator, RegressorMixin, TransformerMixin
from ..utils import check_array, check_consistent_length
from ..externals import six
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import linalg
from ..utils import arpack
from ..utils.validation import check_is_fitted
__all__ = ['PLSCanonical', 'PLSRegression', 'PLSSVD']
def _nipals_twoblocks_inner_loop(X, Y, mode="A", max_iter=500, tol=1e-06,
norm_y_weights=False):
"""Inner loop of the iterative NIPALS algorithm.
Provides an alternative to the svd(X'Y); returns the first left and right
singular vectors of X'Y. See PLS for the meaning of the parameters. It is
similar to the Power method for determining the eigenvectors and
eigenvalues of a X'Y.
"""
y_score = Y[:, [0]]
x_weights_old = 0
ite = 1
X_pinv = Y_pinv = None
eps = np.finfo(X.dtype).eps
# Inner loop of the Wold algo.
while True:
# 1.1 Update u: the X weights
if mode == "B":
if X_pinv is None:
X_pinv = linalg.pinv(X) # compute once pinv(X)
x_weights = np.dot(X_pinv, y_score)
else: # mode A
# Mode A regress each X column on y_score
x_weights = np.dot(X.T, y_score) / np.dot(y_score.T, y_score)
# 1.2 Normalize u
x_weights /= np.sqrt(np.dot(x_weights.T, x_weights)) + eps
# 1.3 Update x_score: the X latent scores
x_score = np.dot(X, x_weights)
# 2.1 Update y_weights
if mode == "B":
if Y_pinv is None:
Y_pinv = linalg.pinv(Y) # compute once pinv(Y)
y_weights = np.dot(Y_pinv, x_score)
else:
# Mode A regress each Y column on x_score
y_weights = np.dot(Y.T, x_score) / np.dot(x_score.T, x_score)
# 2.2 Normalize y_weights
if norm_y_weights:
y_weights /= np.sqrt(np.dot(y_weights.T, y_weights)) + eps
# 2.3 Update y_score: the Y latent scores
y_score = np.dot(Y, y_weights) / (np.dot(y_weights.T, y_weights) + eps)
# y_score = np.dot(Y, y_weights) / np.dot(y_score.T, y_score) ## BUG
x_weights_diff = x_weights - x_weights_old
if np.dot(x_weights_diff.T, x_weights_diff) < tol or Y.shape[1] == 1:
break
if ite == max_iter:
warnings.warn('Maximum number of iterations reached')
break
x_weights_old = x_weights
ite += 1
return x_weights, y_weights, ite
def _svd_cross_product(X, Y):
C = np.dot(X.T, Y)
U, s, Vh = linalg.svd(C, full_matrices=False)
u = U[:, [0]]
v = Vh.T[:, [0]]
return u, v
def _center_scale_xy(X, Y, scale=True):
""" Center X, Y and scale if the scale parameter==True
Returns
-------
X, Y, x_mean, y_mean, x_std, y_std
"""
# center
x_mean = X.mean(axis=0)
X -= x_mean
y_mean = Y.mean(axis=0)
Y -= y_mean
# scale
if scale:
x_std = X.std(axis=0, ddof=1)
x_std[x_std == 0.0] = 1.0
X /= x_std
y_std = Y.std(axis=0, ddof=1)
y_std[y_std == 0.0] = 1.0
Y /= y_std
else:
x_std = np.ones(X.shape[1])
y_std = np.ones(Y.shape[1])
return X, Y, x_mean, y_mean, x_std, y_std
class _PLS(six.with_metaclass(ABCMeta), BaseEstimator, TransformerMixin,
RegressorMixin):
"""Partial Least Squares (PLS)
This class implements the generic PLS algorithm, constructors' parameters
allow to obtain a specific implementation such as:
- PLS2 regression, i.e., PLS 2 blocks, mode A, with asymmetric deflation
and unnormalized y weights such as defined by [Tenenhaus 1998] p. 132.
With univariate response it implements PLS1.
- PLS canonical, i.e., PLS 2 blocks, mode A, with symmetric deflation and
normalized y weights such as defined by [Tenenhaus 1998] (p. 132) and
[Wegelin et al. 2000]. This parametrization implements the original Wold
algorithm.
We use the terminology defined by [Wegelin et al. 2000].
This implementation uses the PLS Wold 2 blocks algorithm based on two
nested loops:
(i) The outer loop iterate over components.
(ii) The inner loop estimates the weights vectors. This can be done
with two algo. (a) the inner loop of the original NIPALS algo. or (b) a
SVD on residuals cross-covariance matrices.
n_components : int, number of components to keep. (default 2).
scale : boolean, scale data? (default True)
deflation_mode : str, "canonical" or "regression". See notes.
mode : "A" classical PLS and "B" CCA. See notes.
norm_y_weights: boolean, normalize Y weights to one? (default False)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, the maximum number of iterations (default 500)
of the NIPALS inner loop (used only if algorithm="nipals")
tol : non-negative real, default 1e-06
The tolerance used in the iterative algorithm.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effects.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coef_: array, [p, q]
The coefficients of the linear model: ``Y = X coef_ + Err``
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm given is "svd".
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In French but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSRegression
CCA
PLS_SVD
"""
@abstractmethod
def __init__(self, n_components=2, scale=True, deflation_mode="regression",
mode="A", algorithm="nipals", norm_y_weights=False,
max_iter=500, tol=1e-06, copy=True):
self.n_components = n_components
self.deflation_mode = deflation_mode
self.mode = mode
self.norm_y_weights = norm_y_weights
self.scale = scale
self.algorithm = algorithm
self.max_iter = max_iter
self.tol = tol
self.copy = copy
def fit(self, X, Y):
"""Fit model to data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples in the number of samples and
n_features is the number of predictors.
Y : array-like of response, shape = [n_samples, n_targets]
Target vectors, where n_samples in the number of samples and
n_targets is the number of response variables.
"""
# copy since this will contains the residuals (deflated) matrices
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float64, copy=self.copy)
Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
n = X.shape[0]
p = X.shape[1]
q = Y.shape[1]
if self.n_components < 1 or self.n_components > p:
raise ValueError('Invalid number of components: %d' %
self.n_components)
if self.algorithm not in ("svd", "nipals"):
raise ValueError("Got algorithm %s when only 'svd' "
"and 'nipals' are known" % self.algorithm)
if self.algorithm == "svd" and self.mode == "B":
raise ValueError('Incompatible configuration: mode B is not '
'implemented with svd algorithm')
if self.deflation_mode not in ["canonical", "regression"]:
raise ValueError('The deflation mode is unknown')
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_\
= _center_scale_xy(X, Y, self.scale)
# Residuals (deflated) matrices
Xk = X
Yk = Y
# Results matrices
self.x_scores_ = np.zeros((n, self.n_components))
self.y_scores_ = np.zeros((n, self.n_components))
self.x_weights_ = np.zeros((p, self.n_components))
self.y_weights_ = np.zeros((q, self.n_components))
self.x_loadings_ = np.zeros((p, self.n_components))
self.y_loadings_ = np.zeros((q, self.n_components))
self.n_iter_ = []
# NIPALS algo: outer loop, over components
for k in range(self.n_components):
if np.all(np.dot(Yk.T, Yk) < np.finfo(np.double).eps):
# Yk constant
warnings.warn('Y residual constant at iteration %s' % k)
break
# 1) weights estimation (inner loop)
# -----------------------------------
if self.algorithm == "nipals":
x_weights, y_weights, n_iter_ = \
_nipals_twoblocks_inner_loop(
X=Xk, Y=Yk, mode=self.mode, max_iter=self.max_iter,
tol=self.tol, norm_y_weights=self.norm_y_weights)
self.n_iter_.append(n_iter_)
elif self.algorithm == "svd":
x_weights, y_weights = _svd_cross_product(X=Xk, Y=Yk)
# compute scores
x_scores = np.dot(Xk, x_weights)
if self.norm_y_weights:
y_ss = 1
else:
y_ss = np.dot(y_weights.T, y_weights)
y_scores = np.dot(Yk, y_weights) / y_ss
# test for null variance
if np.dot(x_scores.T, x_scores) < np.finfo(np.double).eps:
warnings.warn('X scores are null at iteration %s' % k)
break
# 2) Deflation (in place)
# ----------------------
# Possible memory footprint reduction may done here: in order to
# avoid the allocation of a data chunk for the rank-one
# approximations matrix which is then subtracted to Xk, we suggest
# to perform a column-wise deflation.
#
# - regress Xk's on x_score
x_loadings = np.dot(Xk.T, x_scores) / np.dot(x_scores.T, x_scores)
# - subtract rank-one approximations to obtain remainder matrix
Xk -= np.dot(x_scores, x_loadings.T)
if self.deflation_mode == "canonical":
# - regress Yk's on y_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, y_scores)
/ np.dot(y_scores.T, y_scores))
Yk -= np.dot(y_scores, y_loadings.T)
if self.deflation_mode == "regression":
# - regress Yk's on x_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, x_scores)
/ np.dot(x_scores.T, x_scores))
Yk -= np.dot(x_scores, y_loadings.T)
# 3) Store weights, scores and loadings # Notation:
self.x_scores_[:, k] = x_scores.ravel() # T
self.y_scores_[:, k] = y_scores.ravel() # U
self.x_weights_[:, k] = x_weights.ravel() # W
self.y_weights_[:, k] = y_weights.ravel() # C
self.x_loadings_[:, k] = x_loadings.ravel() # P
self.y_loadings_[:, k] = y_loadings.ravel() # Q
# Such that: X = TP' + Err and Y = UQ' + Err
# 4) rotations from input space to transformed space (scores)
# T = X W(P'W)^-1 = XW* (W* : p x k matrix)
# U = Y C(Q'C)^-1 = YC* (W* : q x k matrix)
self.x_rotations_ = np.dot(
self.x_weights_,
linalg.pinv(np.dot(self.x_loadings_.T, self.x_weights_)))
if Y.shape[1] > 1:
self.y_rotations_ = np.dot(
self.y_weights_,
linalg.pinv(np.dot(self.y_loadings_.T, self.y_weights_)))
else:
self.y_rotations_ = np.ones(1)
if True or self.deflation_mode == "regression":
# FIXME what's with the if?
# Estimate regression coefficient
# Regress Y on T
# Y = TQ' + Err,
# Then express in function of X
# Y = X W(P'W)^-1Q' + Err = XB + Err
# => B = W*Q' (p x q)
self.coef_ = np.dot(self.x_rotations_, self.y_loadings_.T)
self.coef_ = (1. / self.x_std_.reshape((p, 1)) * self.coef_ *
self.y_std_)
return self
def transform(self, X, Y=None, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
check_is_fitted(self, 'x_mean_')
X = check_array(X, copy=copy)
# Normalize
X -= self.x_mean_
X /= self.x_std_
# Apply rotation
x_scores = np.dot(X, self.x_rotations_)
if Y is not None:
Y = check_array(Y, ensure_2d=False, copy=copy)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
Y -= self.y_mean_
Y /= self.y_std_
y_scores = np.dot(Y, self.y_rotations_)
return x_scores, y_scores
return x_scores
def predict(self, X, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Notes
-----
This call requires the estimation of a p x q matrix, which may
be an issue in high dimensional space.
"""
check_is_fitted(self, 'x_mean_')
X = check_array(X, copy=copy)
# Normalize
X -= self.x_mean_
X /= self.x_std_
Ypred = np.dot(X, self.coef_)
return Ypred + self.y_mean_
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
check_is_fitted(self, 'x_mean_')
return self.fit(X, y, **fit_params).transform(X, y)
class PLSRegression(_PLS):
"""PLS regression
PLSRegression implements the PLS 2 blocks regression known as PLS2 or PLS1
in case of one dimensional response.
This class inherits from _PLS with mode="A", deflation_mode="regression",
norm_y_weights=False and algorithm="nipals".
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, (default 2)
Number of components to keep.
scale : boolean, (default True)
whether to scale the data
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real
Tolerance used in the iterative algorithm default 1e-06.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coef_: array, [p, q]
The coefficients of the linear model: ``Y = X coef_ + Err``
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
For each component k, find weights u, v that optimizes:
``max corr(Xk u, Yk v) * var(Xk u) var(Yk u)``, such that ``|u| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on
the current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current X score. This performs the PLS regression known as PLS2. This
mode is prediction oriented.
This implementation provides the same results that 3 PLS packages
provided in the R language (R-project):
- "mixOmics" with function pls(X, Y, mode = "regression")
- "plspm " with function plsreg2(X, Y)
- "pls" with function oscorespls.fit(X, Y)
Examples
--------
>>> from sklearn.cross_decomposition import PLSRegression
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> pls2 = PLSRegression(n_components=2)
>>> pls2.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSRegression(copy=True, max_iter=500, n_components=2, scale=True,
tol=1e-06)
>>> Y_pred = pls2.predict(X)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="regression", mode="A",
norm_y_weights=False, max_iter=max_iter, tol=tol,
copy=copy)
class PLSCanonical(_PLS):
""" PLSCanonical implements the 2 blocks canonical PLS of the original Wold
algorithm [Tenenhaus 1998] p.204, referred as PLS-C2A in [Wegelin 2000].
This class inherits from PLS with mode="A" and deflation_mode="canonical",
norm_y_weights=True and algorithm="nipals", but svd should provide similar
results up to numerical errors.
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
scale : boolean, scale data? (default True)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real, default 1e-06
the tolerance used in the iterative algorithm
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
n_components : int, number of components to keep. (default 2).
Attributes
----------
x_weights_ : array, shape = [p, n_components]
X block weights vectors.
y_weights_ : array, shape = [q, n_components]
Y block weights vectors.
x_loadings_ : array, shape = [p, n_components]
X block loadings vectors.
y_loadings_ : array, shape = [q, n_components]
Y block loadings vectors.
x_scores_ : array, shape = [n_samples, n_components]
X scores.
y_scores_ : array, shape = [n_samples, n_components]
Y scores.
x_rotations_ : array, shape = [p, n_components]
X block to latents rotations.
y_rotations_ : array, shape = [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm provided is "svd".
Notes
-----
For each component k, find weights u, v that optimize::
max corr(Xk u, Yk v) * var(Xk u) var(Yk u), such that ``|u| = |v| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score. This performs a canonical symmetric version of the PLS
regression. But slightly different than the CCA. This is mostly used
for modeling.
This implementation provides the same results that the "plspm" package
provided in the R language (R-project), using the function plsca(X, Y).
Results are equal or collinear with the function
``pls(..., mode = "canonical")`` of the "mixOmics" package. The difference
relies in the fact that mixOmics implementation does not exactly implement
the Wold algorithm since it does not normalize y_weights to one.
Examples
--------
>>> from sklearn.cross_decomposition import PLSCanonical
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> plsca = PLSCanonical(n_components=2)
>>> plsca.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSCanonical(algorithm='nipals', copy=True, max_iter=500, n_components=2,
scale=True, tol=1e-06)
>>> X_c, Y_c = plsca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
CCA
PLSSVD
"""
def __init__(self, n_components=2, scale=True, algorithm="nipals",
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="canonical", mode="A",
norm_y_weights=True, algorithm=algorithm,
max_iter=max_iter, tol=tol, copy=copy)
class PLSSVD(BaseEstimator, TransformerMixin):
"""Partial Least Square SVD
Simply perform a svd on the crosscovariance matrix: X'Y
There are no iterative deflation here.
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, default 2
Number of components to keep.
scale : boolean, default True
Whether to scale X and Y.
copy : boolean, default True
Whether to copy X and Y, or perform in-place computations.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
See also
--------
PLSCanonical
CCA
"""
def __init__(self, n_components=2, scale=True, copy=True):
self.n_components = n_components
self.scale = scale
self.copy = copy
def fit(self, X, Y):
# copy since this will contains the centered data
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float64, copy=self.copy)
Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
if self.n_components > max(Y.shape[1], X.shape[1]):
raise ValueError("Invalid number of components n_components=%d"
" with X of shape %s and Y of shape %s."
% (self.n_components, str(X.shape), str(Y.shape)))
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_ =\
_center_scale_xy(X, Y, self.scale)
# svd(X'Y)
C = np.dot(X.T, Y)
# The arpack svds solver only works if the number of extracted
# components is smaller than rank(X) - 1. Hence, if we want to extract
# all the components (C.shape[1]), we have to use another one. Else,
# let's use arpacks to compute only the interesting components.
if self.n_components >= np.min(C.shape):
U, s, V = linalg.svd(C, full_matrices=False)
else:
U, s, V = arpack.svds(C, k=self.n_components)
V = V.T
self.x_scores_ = np.dot(X, U)
self.y_scores_ = np.dot(Y, V)
self.x_weights_ = U
self.y_weights_ = V
return self
def transform(self, X, Y=None):
"""Apply the dimension reduction learned on the train data."""
check_is_fitted(self, 'x_mean_')
X = check_array(X, dtype=np.float64)
Xr = (X - self.x_mean_) / self.x_std_
x_scores = np.dot(Xr, self.x_weights_)
if Y is not None:
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
Yr = (Y - self.y_mean_) / self.y_std_
y_scores = np.dot(Yr, self.y_weights_)
return x_scores, y_scores
return x_scores
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
return self.fit(X, y, **fit_params).transform(X, y)
| bsd-3-clause |
evandromr/python_scitools | plotperiodogram.py | 1 | 1814 | #!/bin/env python
import numpy as np
import scipy.signal as ss
import astropy.io.fits as fits
import matplotlib.pyplot as plt
inpt = str(raw_input("Nome do Arquivo: "))
lc = fits.open(inpt)
bin = float(raw_input("bin size (or camera resolution): "))
# Convert to big-endian array is necessary to the lombscargle function
rate = np.array(lc[1].data["RATE"], dtype='float64')
time = np.array(lc[1].data["TIME"], dtype='float64')
time -= time.min()
# Exclue NaN values -------------------------
print ''
print 'Excluding nan and negative values...'
print ''
exclude = []
for i in xrange(len(rate)):
if rate[i] > 0:
pass
else:
exclude.append(i)
exclude = np.array(exclude)
nrate = np.delete(rate, exclude)
ntime = np.delete(time, exclude)
# --------------------------------------------
# normalize count rate
nrate -= nrate.mean()
# maximum frequecy limited by resolution
freqmax = 1.0/bin
# Ther periodogram itself
f, p = ss.periodogram(nrate, fs=freqmax)#, nfft=1500)
print 'TIME =', max(time)
# Plot lightcurve on top panel
#plt.subplot(2, 1, 1)
#plt.plot(ntime, nrate, 'bo-')
#plt.xlabel('Time [s]', fontsize=12)
#plt.ylabel('Normalized Count Rate [counts/s]', fontsize=12)
# Plot powerspectrum on bottom panel
#plt.subplot(2, 1, 2)
#plt.plot(f, p, 'b.-', label='f = {0:.3e}'.format(f[np.argmax(p)]))
#plt.xlabel('Frequency [Hz]', fontsize=12)
#plt.ylabel('Power', fontsize=12)
#plt.legend(loc='best')
# show plot
#plt.show()
#plt.plot(f, p)
plt.plot(f, p, linestyle='steps', label='T$_{{peak}}$ = {0:.3f} s'.format(1.0/f[np.argmax(p)]))
plt.xlabel('Frequency (Hz)')
plt.ylabel('Power')
plt.xlim(min(f), max(f))
plt.legend(loc='best', frameon=False)
plt.savefig("periodogram.pdf", orientation='landscape', papertype='a4',
format='pdf', bbox_inches='tight')
plt.show()
| mit |
JavierGarciaD/athena | athena/testcases/performance_test.py | 1 | 2286 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# testcases.performance_test.py
'''
@since: 2014-11-25
@author: Javier Garcia
@contact: [email protected]
@summary: Tests for the performance module
'''
import unittest
import pandas as pd
import numpy as np
from performance import performance
import math
# pylint: disable=too-many-public-methods
# Nothing I can do here, are Pylint methods
class TestPerformance(unittest.TestCase):
"""
Test the performance measure
"""
def test_create_drowdown1(self):
"""
Test1: non random serie with 3 inflection points
"""
rows = 100
step = 1
values = [100]
inflection1 = 0.15
inflection2 = 0.50
inflection3 = 0.90
for each_row in range(rows - 1):
if each_row < (rows * inflection1):
values.append(values[-1] - step)
elif (each_row >= rows) * inflection1 and \
(each_row < rows) * inflection2:
values.append(values[-1] + step)
elif (each_row >= rows * inflection2) and \
(each_row < rows) * inflection3:
values.append(values[-1] - step)
elif each_row >= rows * inflection3:
values.append(values[-1] + step)
pnl = pd.Series(values, index=np.arange(rows))
result = performance.create_drawdowns(pnl)
expected = (40.0, 49.0)
self.assertEqual(result, expected, 'Drawdown error calculation')
def test_create_sharpe_ratio0(self):
"""
test for correct calculations
"""
simm = [10, 9, 11, 10, 12, 11, 13, 12, 14, 13, 15]
prices = pd.Series(simm)
returns = prices.pct_change()
result = performance.create_sharpe_ratio(returns)
expected = 5.85973697
self.assertAlmostEqual(result, expected)
def test_create_sharpe_ratio1(self):
"""
test for non volatility serie
"""
simm = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
prices = pd.Series(simm)
returns = prices.pct_change()
result = performance.create_sharpe_ratio(returns)
self.assertTrue(math.isnan(result))
| gpl-3.0 |
kirangonella/BuildingMachineLearningSystemsWithPython | ch02/figure1.py | 22 | 1199 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
from matplotlib import pyplot as plt
# We load the data with load_iris from sklearn
from sklearn.datasets import load_iris
# load_iris returns an object with several fields
data = load_iris()
features = data.data
feature_names = data.feature_names
target = data.target
target_names = data.target_names
fig,axes = plt.subplots(2, 3)
pairs = [(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)]
# Set up 3 different pairs of (color, marker)
color_markers = [
('r', '>'),
('g', 'o'),
('b', 'x'),
]
for i, (p0, p1) in enumerate(pairs):
ax = axes.flat[i]
for t in range(3):
# Use a different color/marker for each class `t`
c,marker = color_markers[t]
ax.scatter(features[target == t, p0], features[
target == t, p1], marker=marker, c=c)
ax.set_xlabel(feature_names[p0])
ax.set_ylabel(feature_names[p1])
ax.set_xticks([])
ax.set_yticks([])
fig.tight_layout()
fig.savefig('figure1.png')
| mit |
themrmax/scikit-learn | sklearn/linear_model/omp.py | 8 | 31640 | """Orthogonal matching pursuit algorithms
"""
# Author: Vlad Niculae
#
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import linalg
from scipy.linalg.lapack import get_lapack_funcs
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from ..utils import as_float_array, check_array, check_X_y
from ..model_selection import check_cv
from ..externals.joblib import Parallel, delayed
solve_triangular_args = {'check_finite': False}
premature = """ Orthogonal matching pursuit ended prematurely due to linear
dependence in the dictionary. The requested precision might not have been met.
"""
def _cholesky_omp(X, y, n_nonzero_coefs, tol=None, copy_X=True,
return_path=False):
"""Orthogonal Matching Pursuit step using the Cholesky decomposition.
Parameters
----------
X : array, shape (n_samples, n_features)
Input dictionary. Columns are assumed to have unit norm.
y : array, shape (n_samples,)
Input targets
n_nonzero_coefs : int
Targeted number of non-zero elements
tol : float
Targeted squared error, if not None overrides n_nonzero_coefs.
copy_X : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
Returns
-------
gamma : array, shape (n_nonzero_coefs,)
Non-zero elements of the solution
idx : array, shape (n_nonzero_coefs,)
Indices of the positions of the elements in gamma within the solution
vector
coef : array, shape (n_features, n_nonzero_coefs)
The first k values of column k correspond to the coefficient value
for the active features at that step. The lower left triangle contains
garbage. Only returned if ``return_path=True``.
n_active : int
Number of active features at convergence.
"""
if copy_X:
X = X.copy('F')
else: # even if we are allowed to overwrite, still copy it if bad order
X = np.asfortranarray(X)
min_float = np.finfo(X.dtype).eps
nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (X,))
potrs, = get_lapack_funcs(('potrs',), (X,))
alpha = np.dot(X.T, y)
residual = y
gamma = np.empty(0)
n_active = 0
indices = np.arange(X.shape[1]) # keeping track of swapping
max_features = X.shape[1] if tol is not None else n_nonzero_coefs
if solve_triangular_args:
# new scipy, don't need to initialize because check_finite=False
L = np.empty((max_features, max_features), dtype=X.dtype)
else:
# old scipy, we need the garbage upper triangle to be non-Inf
L = np.zeros((max_features, max_features), dtype=X.dtype)
L[0, 0] = 1.
if return_path:
coefs = np.empty_like(L)
while True:
lam = np.argmax(np.abs(np.dot(X.T, residual)))
if lam < n_active or alpha[lam] ** 2 < min_float:
# atom already selected or inner product too small
warnings.warn(premature, RuntimeWarning, stacklevel=2)
break
if n_active > 0:
# Updates the Cholesky decomposition of X' X
L[n_active, :n_active] = np.dot(X[:, :n_active].T, X[:, lam])
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = nrm2(L[n_active, :n_active]) ** 2
if 1 - v <= min_float: # selected atoms are dependent
warnings.warn(premature, RuntimeWarning, stacklevel=2)
break
L[n_active, n_active] = np.sqrt(1 - v)
X.T[n_active], X.T[lam] = swap(X.T[n_active], X.T[lam])
alpha[n_active], alpha[lam] = alpha[lam], alpha[n_active]
indices[n_active], indices[lam] = indices[lam], indices[n_active]
n_active += 1
# solves LL'x = y as a composition of two triangular systems
gamma, _ = potrs(L[:n_active, :n_active], alpha[:n_active], lower=True,
overwrite_b=False)
if return_path:
coefs[:n_active, n_active - 1] = gamma
residual = y - np.dot(X[:, :n_active], gamma)
if tol is not None and nrm2(residual) ** 2 <= tol:
break
elif n_active == max_features:
break
if return_path:
return gamma, indices[:n_active], coefs[:, :n_active], n_active
else:
return gamma, indices[:n_active], n_active
def _gram_omp(Gram, Xy, n_nonzero_coefs, tol_0=None, tol=None,
copy_Gram=True, copy_Xy=True, return_path=False):
"""Orthogonal Matching Pursuit step on a precomputed Gram matrix.
This function uses the Cholesky decomposition method.
Parameters
----------
Gram : array, shape (n_features, n_features)
Gram matrix of the input data matrix
Xy : array, shape (n_features,)
Input targets
n_nonzero_coefs : int
Targeted number of non-zero elements
tol_0 : float
Squared norm of y, required if tol is not None.
tol : float
Targeted squared error, if not None overrides n_nonzero_coefs.
copy_Gram : bool, optional
Whether the gram matrix must be copied by the algorithm. A false
value is only helpful if it is already Fortran-ordered, otherwise a
copy is made anyway.
copy_Xy : bool, optional
Whether the covariance vector Xy must be copied by the algorithm.
If False, it may be overwritten.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
Returns
-------
gamma : array, shape (n_nonzero_coefs,)
Non-zero elements of the solution
idx : array, shape (n_nonzero_coefs,)
Indices of the positions of the elements in gamma within the solution
vector
coefs : array, shape (n_features, n_nonzero_coefs)
The first k values of column k correspond to the coefficient value
for the active features at that step. The lower left triangle contains
garbage. Only returned if ``return_path=True``.
n_active : int
Number of active features at convergence.
"""
Gram = Gram.copy('F') if copy_Gram else np.asfortranarray(Gram)
if copy_Xy:
Xy = Xy.copy()
min_float = np.finfo(Gram.dtype).eps
nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (Gram,))
potrs, = get_lapack_funcs(('potrs',), (Gram,))
indices = np.arange(len(Gram)) # keeping track of swapping
alpha = Xy
tol_curr = tol_0
delta = 0
gamma = np.empty(0)
n_active = 0
max_features = len(Gram) if tol is not None else n_nonzero_coefs
if solve_triangular_args:
# new scipy, don't need to initialize because check_finite=False
L = np.empty((max_features, max_features), dtype=Gram.dtype)
else:
# old scipy, we need the garbage upper triangle to be non-Inf
L = np.zeros((max_features, max_features), dtype=Gram.dtype)
L[0, 0] = 1.
if return_path:
coefs = np.empty_like(L)
while True:
lam = np.argmax(np.abs(alpha))
if lam < n_active or alpha[lam] ** 2 < min_float:
# selected same atom twice, or inner product too small
warnings.warn(premature, RuntimeWarning, stacklevel=3)
break
if n_active > 0:
L[n_active, :n_active] = Gram[lam, :n_active]
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = nrm2(L[n_active, :n_active]) ** 2
if 1 - v <= min_float: # selected atoms are dependent
warnings.warn(premature, RuntimeWarning, stacklevel=3)
break
L[n_active, n_active] = np.sqrt(1 - v)
Gram[n_active], Gram[lam] = swap(Gram[n_active], Gram[lam])
Gram.T[n_active], Gram.T[lam] = swap(Gram.T[n_active], Gram.T[lam])
indices[n_active], indices[lam] = indices[lam], indices[n_active]
Xy[n_active], Xy[lam] = Xy[lam], Xy[n_active]
n_active += 1
# solves LL'x = y as a composition of two triangular systems
gamma, _ = potrs(L[:n_active, :n_active], Xy[:n_active], lower=True,
overwrite_b=False)
if return_path:
coefs[:n_active, n_active - 1] = gamma
beta = np.dot(Gram[:, :n_active], gamma)
alpha = Xy - beta
if tol is not None:
tol_curr += delta
delta = np.inner(gamma, beta[:n_active])
tol_curr -= delta
if abs(tol_curr) <= tol:
break
elif n_active == max_features:
break
if return_path:
return gamma, indices[:n_active], coefs[:, :n_active], n_active
else:
return gamma, indices[:n_active], n_active
def orthogonal_mp(X, y, n_nonzero_coefs=None, tol=None, precompute=False,
copy_X=True, return_path=False,
return_n_iter=False):
"""Orthogonal Matching Pursuit (OMP)
Solves n_targets Orthogonal Matching Pursuit problems.
An instance of the problem has the form:
When parametrized by the number of non-zero coefficients using
`n_nonzero_coefs`:
argmin ||y - X\gamma||^2 subject to ||\gamma||_0 <= n_{nonzero coefs}
When parametrized by error using the parameter `tol`:
argmin ||\gamma||_0 subject to ||y - X\gamma||^2 <= tol
Read more in the :ref:`User Guide <omp>`.
Parameters
----------
X : array, shape (n_samples, n_features)
Input data. Columns are assumed to have unit norm.
y : array, shape (n_samples,) or (n_samples, n_targets)
Input targets
n_nonzero_coefs : int
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
precompute : {True, False, 'auto'},
Whether to perform precomputations. Improves performance when n_targets
or n_samples is very large.
copy_X : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
return_n_iter : bool, optional default False
Whether or not to return the number of iterations.
Returns
-------
coef : array, shape (n_features,) or (n_features, n_targets)
Coefficients of the OMP solution. If `return_path=True`, this contains
the whole coefficient path. In this case its shape is
(n_features, n_features) or (n_features, n_targets, n_features) and
iterating over the last axis yields coefficients in increasing order
of active features.
n_iters : array-like or int
Number of active features across every target. Returned only if
`return_n_iter` is set to True.
See also
--------
OrthogonalMatchingPursuit
orthogonal_mp_gram
lars_path
decomposition.sparse_encode
Notes
-----
Orthogonal matching pursuit was introduced in S. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
"""
X = check_array(X, order='F', copy=copy_X)
copy_X = False
if y.ndim == 1:
y = y.reshape(-1, 1)
y = check_array(y)
if y.shape[1] > 1: # subsequent targets will be affected
copy_X = True
if n_nonzero_coefs is None and tol is None:
# default for n_nonzero_coefs is 0.1 * n_features
# but at least one.
n_nonzero_coefs = max(int(0.1 * X.shape[1]), 1)
if tol is not None and tol < 0:
raise ValueError("Epsilon cannot be negative")
if tol is None and n_nonzero_coefs <= 0:
raise ValueError("The number of atoms must be positive")
if tol is None and n_nonzero_coefs > X.shape[1]:
raise ValueError("The number of atoms cannot be more than the number "
"of features")
if precompute == 'auto':
precompute = X.shape[0] > X.shape[1]
if precompute:
G = np.dot(X.T, X)
G = np.asfortranarray(G)
Xy = np.dot(X.T, y)
if tol is not None:
norms_squared = np.sum((y ** 2), axis=0)
else:
norms_squared = None
return orthogonal_mp_gram(G, Xy, n_nonzero_coefs, tol, norms_squared,
copy_Gram=copy_X, copy_Xy=False,
return_path=return_path)
if return_path:
coef = np.zeros((X.shape[1], y.shape[1], X.shape[1]))
else:
coef = np.zeros((X.shape[1], y.shape[1]))
n_iters = []
for k in range(y.shape[1]):
out = _cholesky_omp(
X, y[:, k], n_nonzero_coefs, tol,
copy_X=copy_X, return_path=return_path)
if return_path:
_, idx, coefs, n_iter = out
coef = coef[:, :, :len(idx)]
for n_active, x in enumerate(coefs.T):
coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1]
else:
x, idx, n_iter = out
coef[idx, k] = x
n_iters.append(n_iter)
if y.shape[1] == 1:
n_iters = n_iters[0]
if return_n_iter:
return np.squeeze(coef), n_iters
else:
return np.squeeze(coef)
def orthogonal_mp_gram(Gram, Xy, n_nonzero_coefs=None, tol=None,
norms_squared=None, copy_Gram=True,
copy_Xy=True, return_path=False,
return_n_iter=False):
"""Gram Orthogonal Matching Pursuit (OMP)
Solves n_targets Orthogonal Matching Pursuit problems using only
the Gram matrix X.T * X and the product X.T * y.
Read more in the :ref:`User Guide <omp>`.
Parameters
----------
Gram : array, shape (n_features, n_features)
Gram matrix of the input data: X.T * X
Xy : array, shape (n_features,) or (n_features, n_targets)
Input targets multiplied by X: X.T * y
n_nonzero_coefs : int
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
norms_squared : array-like, shape (n_targets,)
Squared L2 norms of the lines of y. Required if tol is not None.
copy_Gram : bool, optional
Whether the gram matrix must be copied by the algorithm. A false
value is only helpful if it is already Fortran-ordered, otherwise a
copy is made anyway.
copy_Xy : bool, optional
Whether the covariance vector Xy must be copied by the algorithm.
If False, it may be overwritten.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
return_n_iter : bool, optional default False
Whether or not to return the number of iterations.
Returns
-------
coef : array, shape (n_features,) or (n_features, n_targets)
Coefficients of the OMP solution. If `return_path=True`, this contains
the whole coefficient path. In this case its shape is
(n_features, n_features) or (n_features, n_targets, n_features) and
iterating over the last axis yields coefficients in increasing order
of active features.
n_iters : array-like or int
Number of active features across every target. Returned only if
`return_n_iter` is set to True.
See also
--------
OrthogonalMatchingPursuit
orthogonal_mp
lars_path
decomposition.sparse_encode
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
"""
Gram = check_array(Gram, order='F', copy=copy_Gram)
Xy = np.asarray(Xy)
if Xy.ndim > 1 and Xy.shape[1] > 1:
# or subsequent target will be affected
copy_Gram = True
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if tol is not None:
norms_squared = [norms_squared]
if n_nonzero_coefs is None and tol is None:
n_nonzero_coefs = int(0.1 * len(Gram))
if tol is not None and norms_squared is None:
raise ValueError('Gram OMP needs the precomputed norms in order '
'to evaluate the error sum of squares.')
if tol is not None and tol < 0:
raise ValueError("Epsilon cannot be negative")
if tol is None and n_nonzero_coefs <= 0:
raise ValueError("The number of atoms must be positive")
if tol is None and n_nonzero_coefs > len(Gram):
raise ValueError("The number of atoms cannot be more than the number "
"of features")
if return_path:
coef = np.zeros((len(Gram), Xy.shape[1], len(Gram)))
else:
coef = np.zeros((len(Gram), Xy.shape[1]))
n_iters = []
for k in range(Xy.shape[1]):
out = _gram_omp(
Gram, Xy[:, k], n_nonzero_coefs,
norms_squared[k] if tol is not None else None, tol,
copy_Gram=copy_Gram, copy_Xy=copy_Xy,
return_path=return_path)
if return_path:
_, idx, coefs, n_iter = out
coef = coef[:, :, :len(idx)]
for n_active, x in enumerate(coefs.T):
coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1]
else:
x, idx, n_iter = out
coef[idx, k] = x
n_iters.append(n_iter)
if Xy.shape[1] == 1:
n_iters = n_iters[0]
if return_n_iter:
return np.squeeze(coef), n_iters
else:
return np.squeeze(coef)
class OrthogonalMatchingPursuit(LinearModel, RegressorMixin):
"""Orthogonal Matching Pursuit model (OMP)
Parameters
----------
n_nonzero_coefs : int, optional
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float, optional
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default True
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
precompute : {True, False, 'auto'}, default 'auto'
Whether to use a precomputed Gram and Xy matrix to speed up
calculations. Improves performance when `n_targets` or `n_samples` is
very large. Note that if you already have such matrices, you can pass
them directly to the fit method.
Read more in the :ref:`User Guide <omp>`.
Attributes
----------
coef_ : array, shape (n_features,) or (n_targets, n_features)
parameter vector (w in the formula)
intercept_ : float or array, shape (n_targets,)
independent term in decision function.
n_iter_ : int or array-like
Number of active features across every target.
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
See also
--------
orthogonal_mp
orthogonal_mp_gram
lars_path
Lars
LassoLars
decomposition.sparse_encode
"""
def __init__(self, n_nonzero_coefs=None, tol=None, fit_intercept=True,
normalize=True, precompute='auto'):
self.n_nonzero_coefs = n_nonzero_coefs
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
n_features = X.shape[1]
X, y, X_offset, y_offset, X_scale, Gram, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=True)
if y.ndim == 1:
y = y[:, np.newaxis]
if self.n_nonzero_coefs is None and self.tol is None:
# default for n_nonzero_coefs is 0.1 * n_features
# but at least one.
self.n_nonzero_coefs_ = max(int(0.1 * n_features), 1)
else:
self.n_nonzero_coefs_ = self.n_nonzero_coefs
if Gram is False:
coef_, self.n_iter_ = orthogonal_mp(
X, y, self.n_nonzero_coefs_, self.tol,
precompute=False, copy_X=True,
return_n_iter=True)
else:
norms_sq = np.sum(y ** 2, axis=0) if self.tol is not None else None
coef_, self.n_iter_ = orthogonal_mp_gram(
Gram, Xy=Xy, n_nonzero_coefs=self.n_nonzero_coefs_,
tol=self.tol, norms_squared=norms_sq,
copy_Gram=True, copy_Xy=True,
return_n_iter=True)
self.coef_ = coef_.T
self._set_intercept(X_offset, y_offset, X_scale)
return self
def _omp_path_residues(X_train, y_train, X_test, y_test, copy=True,
fit_intercept=True, normalize=True, max_iter=100):
"""Compute the residues on left-out data for a full LARS path
Parameters
-----------
X_train : array, shape (n_samples, n_features)
The data to fit the LARS on
y_train : array, shape (n_samples)
The target variable to fit LARS on
X_test : array, shape (n_samples, n_features)
The data to compute the residues on
y_test : array, shape (n_samples)
The target variable to compute the residues on
copy : boolean, optional
Whether X_train, X_test, y_train and y_test should be copied. If
False, they may be overwritten.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default True
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
max_iter : integer, optional
Maximum numbers of iterations to perform, therefore maximum features
to include. 100 by default.
Returns
-------
residues : array, shape (n_samples, max_features)
Residues of the prediction on the test data
"""
if copy:
X_train = X_train.copy()
y_train = y_train.copy()
X_test = X_test.copy()
y_test = y_test.copy()
if fit_intercept:
X_mean = X_train.mean(axis=0)
X_train -= X_mean
X_test -= X_mean
y_mean = y_train.mean(axis=0)
y_train = as_float_array(y_train, copy=False)
y_train -= y_mean
y_test = as_float_array(y_test, copy=False)
y_test -= y_mean
if normalize:
norms = np.sqrt(np.sum(X_train ** 2, axis=0))
nonzeros = np.flatnonzero(norms)
X_train[:, nonzeros] /= norms[nonzeros]
coefs = orthogonal_mp(X_train, y_train, n_nonzero_coefs=max_iter, tol=None,
precompute=False, copy_X=False,
return_path=True)
if coefs.ndim == 1:
coefs = coefs[:, np.newaxis]
if normalize:
coefs[nonzeros] /= norms[nonzeros][:, np.newaxis]
return np.dot(coefs.T, X_test.T) - y_test
class OrthogonalMatchingPursuitCV(LinearModel, RegressorMixin):
"""Cross-validated Orthogonal Matching Pursuit model (OMP)
Parameters
----------
copy : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default True
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
max_iter : integer, optional
Maximum numbers of iterations to perform, therefore maximum features
to include. 10% of ``n_features`` but at least 5 if available.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Read more in the :ref:`User Guide <omp>`.
Attributes
----------
intercept_ : float or array, shape (n_targets,)
Independent term in decision function.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the problem formulation).
n_nonzero_coefs_ : int
Estimated number of non-zero coefficients giving the best mean squared
error over the cross-validation folds.
n_iter_ : int or array-like
Number of active features across every target for the model refit with
the best hyperparameters got by cross-validating across all folds.
See also
--------
orthogonal_mp
orthogonal_mp_gram
lars_path
Lars
LassoLars
OrthogonalMatchingPursuit
LarsCV
LassoLarsCV
decomposition.sparse_encode
"""
def __init__(self, copy=True, fit_intercept=True, normalize=True,
max_iter=None, cv=None, n_jobs=1, verbose=False):
self.copy = copy
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.cv = cv
self.n_jobs = n_jobs
self.verbose = verbose
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Training data.
y : array-like, shape [n_samples]
Target values.
Returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, y_numeric=True, ensure_min_features=2,
estimator=self)
X = as_float_array(X, copy=False, force_all_finite=False)
cv = check_cv(self.cv, classifier=False)
max_iter = (min(max(int(0.1 * X.shape[1]), 5), X.shape[1])
if not self.max_iter
else self.max_iter)
cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_omp_path_residues)(
X[train], y[train], X[test], y[test], self.copy,
self.fit_intercept, self.normalize, max_iter)
for train, test in cv.split(X))
min_early_stop = min(fold.shape[0] for fold in cv_paths)
mse_folds = np.array([(fold[:min_early_stop] ** 2).mean(axis=1)
for fold in cv_paths])
best_n_nonzero_coefs = np.argmin(mse_folds.mean(axis=0)) + 1
self.n_nonzero_coefs_ = best_n_nonzero_coefs
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=best_n_nonzero_coefs,
fit_intercept=self.fit_intercept,
normalize=self.normalize)
omp.fit(X, y)
self.coef_ = omp.coef_
self.intercept_ = omp.intercept_
self.n_iter_ = omp.n_iter_
return self
| bsd-3-clause |
shikhar413/openmc | openmc/filter.py | 6 | 63054 | from abc import ABCMeta
from collections import OrderedDict
from collections.abc import Iterable
import hashlib
from itertools import product
from numbers import Real, Integral
from xml.etree import ElementTree as ET
import numpy as np
import pandas as pd
import openmc
import openmc.checkvalue as cv
from .cell import Cell
from .material import Material
from .mixin import IDManagerMixin
from .surface import Surface
from .universe import Universe
_FILTER_TYPES = (
'universe', 'material', 'cell', 'cellborn', 'surface', 'mesh', 'energy',
'energyout', 'mu', 'polar', 'azimuthal', 'distribcell', 'delayedgroup',
'energyfunction', 'cellfrom', 'legendre', 'spatiallegendre',
'sphericalharmonics', 'zernike', 'zernikeradial', 'particle', 'cellinstance'
)
_CURRENT_NAMES = (
'x-min out', 'x-min in', 'x-max out', 'x-max in',
'y-min out', 'y-min in', 'y-max out', 'y-max in',
'z-min out', 'z-min in', 'z-max out', 'z-max in'
)
_PARTICLES = {'neutron', 'photon', 'electron', 'positron'}
class FilterMeta(ABCMeta):
"""Metaclass for filters that ensures class names are appropriate."""
def __new__(cls, name, bases, namespace, **kwargs):
# Check the class name.
required_suffix = 'Filter'
if not name.endswith(required_suffix):
raise ValueError("All filter class names must end with 'Filter'")
# Create a 'short_name' attribute that removes the 'Filter' suffix.
namespace['short_name'] = name[:-len(required_suffix)]
# Subclass methods can sort of inherit the docstring of parent class
# methods. If a function is defined without a docstring, most (all?)
# Python interpreters will search through the parent classes to see if
# there is a docstring for a function with the same name, and they will
# use that docstring. However, Sphinx does not have that functionality.
# This chunk of code handles this docstring inheritance manually so that
# the autodocumentation will pick it up.
if name != required_suffix:
# Look for newly-defined functions that were also in Filter.
for func_name in namespace:
if func_name in Filter.__dict__:
# Inherit the docstring from Filter if not defined.
if isinstance(namespace[func_name],
(classmethod, staticmethod)):
new_doc = namespace[func_name].__func__.__doc__
old_doc = Filter.__dict__[func_name].__func__.__doc__
if new_doc is None and old_doc is not None:
namespace[func_name].__func__.__doc__ = old_doc
else:
new_doc = namespace[func_name].__doc__
old_doc = Filter.__dict__[func_name].__doc__
if new_doc is None and old_doc is not None:
namespace[func_name].__doc__ = old_doc
# Make the class.
return super().__new__(cls, name, bases, namespace, **kwargs)
def _repeat_and_tile(bins, repeat_factor, data_size):
filter_bins = np.repeat(bins, repeat_factor)
tile_factor = data_size // len(filter_bins)
return np.tile(filter_bins, tile_factor)
class Filter(IDManagerMixin, metaclass=FilterMeta):
"""Tally modifier that describes phase-space and other characteristics.
Parameters
----------
bins : Integral or Iterable of Integral or Iterable of Real
The bins for the filter. This takes on different meaning for different
filters. See the docstrings for sublcasses of this filter or the online
documentation for more details.
filter_id : int
Unique identifier for the filter
Attributes
----------
bins : Integral or Iterable of Integral or Iterable of Real
The bins for the filter
id : int
Unique identifier for the filter
num_bins : Integral
The number of filter bins
"""
next_id = 1
used_ids = set()
def __init__(self, bins, filter_id=None):
self.bins = bins
self.id = filter_id
def __eq__(self, other):
if type(self) is not type(other):
return False
elif len(self.bins) != len(other.bins):
return False
else:
return np.allclose(self.bins, other.bins)
def __gt__(self, other):
if type(self) is not type(other):
if self.short_name in _FILTER_TYPES and \
other.short_name in _FILTER_TYPES:
delta = _FILTER_TYPES.index(self.short_name) - \
_FILTER_TYPES.index(other.short_name)
return delta > 0
else:
return False
else:
return max(self.bins) > max(other.bins)
def __lt__(self, other):
return not self > other
def __hash__(self):
string = type(self).__name__ + '\n'
string += '{: <16}=\t{}\n'.format('\tBins', self.bins)
return hash(string)
def __repr__(self):
string = type(self).__name__ + '\n'
string += '{: <16}=\t{}\n'.format('\tBins', self.bins)
string += '{: <16}=\t{}\n'.format('\tID', self.id)
return string
@classmethod
def _recursive_subclasses(cls):
"""Return all subclasses and their subclasses, etc."""
all_subclasses = []
for subclass in cls.__subclasses__():
all_subclasses.append(subclass)
all_subclasses.extend(subclass._recursive_subclasses())
return all_subclasses
@classmethod
def from_hdf5(cls, group, **kwargs):
"""Construct a new Filter instance from HDF5 data.
Parameters
----------
group : h5py.Group
HDF5 group to read from
Keyword arguments
-----------------
meshes : dict
Dictionary mapping integer IDs to openmc.MeshBase objects. Only
used for openmc.MeshFilter objects.
"""
filter_id = int(group.name.split('/')[-1].lstrip('filter '))
# If the HDF5 'type' variable matches this class's short_name, then
# there is no overriden from_hdf5 method. Pass the bins to __init__.
if group['type'][()].decode() == cls.short_name.lower():
out = cls(group['bins'][()], filter_id=filter_id)
out._num_bins = group['n_bins'][()]
return out
# Search through all subclasses and find the one matching the HDF5
# 'type'. Call that class's from_hdf5 method.
for subclass in cls._recursive_subclasses():
if group['type'][()].decode() == subclass.short_name.lower():
return subclass.from_hdf5(group, **kwargs)
raise ValueError("Unrecognized Filter class: '"
+ group['type'][()].decode() + "'")
@property
def bins(self):
return self._bins
@bins.setter
def bins(self, bins):
self.check_bins(bins)
self._bins = bins
@property
def num_bins(self):
return len(self.bins)
def check_bins(self, bins):
"""Make sure given bins are valid for this filter.
Raises
------
TypeError
ValueError
"""
pass
def to_xml_element(self):
"""Return XML Element representing the Filter.
Returns
-------
element : xml.etree.ElementTree.Element
XML element containing filter data
"""
element = ET.Element('filter')
element.set('id', str(self.id))
element.set('type', self.short_name.lower())
subelement = ET.SubElement(element, 'bins')
subelement.text = ' '.join(str(b) for b in self.bins)
return element
def can_merge(self, other):
"""Determine if filter can be merged with another.
Parameters
----------
other : openmc.Filter
Filter to compare with
Returns
-------
bool
Whether the filter can be merged
"""
return type(self) is type(other)
def merge(self, other):
"""Merge this filter with another.
Parameters
----------
other : openmc.Filter
Filter to merge with
Returns
-------
merged_filter : openmc.Filter
Filter resulting from the merge
"""
if not self.can_merge(other):
msg = 'Unable to merge "{0}" with "{1}" '.format(
type(self), type(other))
raise ValueError(msg)
# Merge unique filter bins
merged_bins = np.concatenate((self.bins, other.bins))
merged_bins = np.unique(merged_bins, axis=0)
# Create a new filter with these bins and a new auto-generated ID
return type(self)(merged_bins)
def is_subset(self, other):
"""Determine if another filter is a subset of this filter.
If all of the bins in the other filter are included as bins in this
filter, then it is a subset of this filter.
Parameters
----------
other : openmc.Filter
The filter to query as a subset of this filter
Returns
-------
bool
Whether or not the other filter is a subset of this filter
"""
if type(self) is not type(other):
return False
for b in other.bins:
if b not in self.bins:
return False
return True
def get_bin_index(self, filter_bin):
"""Returns the index in the Filter for some bin.
Parameters
----------
filter_bin : int or tuple
The bin is the integer ID for 'material', 'surface', 'cell',
'cellborn', and 'universe' Filters. The bin is an integer for the
cell instance ID for 'distribcell' Filters. The bin is a 2-tuple of
floats for 'energy' and 'energyout' filters corresponding to the
energy boundaries of the bin of interest. The bin is an (x,y,z)
3-tuple for 'mesh' filters corresponding to the mesh cell of
interest.
Returns
-------
filter_index : int
The index in the Tally data array for this filter bin.
"""
if filter_bin not in self.bins:
msg = 'Unable to get the bin index for Filter since "{0}" ' \
'is not one of the bins'.format(filter_bin)
raise ValueError(msg)
if isinstance(self.bins, np.ndarray):
return np.where(self.bins == filter_bin)[0][0]
else:
return self.bins.index(filter_bin)
def get_pandas_dataframe(self, data_size, stride, **kwargs):
"""Builds a Pandas DataFrame for the Filter's bins.
This method constructs a Pandas DataFrame object for the filter with
columns annotated by filter bin information. This is a helper method for
:meth:`Tally.get_pandas_dataframe`.
Parameters
----------
data_size : int
The total number of bins in the tally corresponding to this filter
stride : int
Stride in memory for the filter
Keyword arguments
-----------------
paths : bool
Only used for DistribcellFilter. If True (default), expand
distribcell indices into multi-index columns describing the path
to that distribcell through the CSG tree. NOTE: This option assumes
that all distribcell paths are of the same length and do not have
the same universes and cells but different lattice cell indices.
Returns
-------
pandas.DataFrame
A Pandas DataFrame with columns of strings that characterize the
filter's bins. The number of rows in the DataFrame is the same as
the total number of bins in the corresponding tally, with the filter
bin appropriately tiled to map to the corresponding tally bins.
See also
--------
Tally.get_pandas_dataframe(), CrossFilter.get_pandas_dataframe()
"""
# Initialize Pandas DataFrame
df = pd.DataFrame()
filter_bins = np.repeat(self.bins, stride)
tile_factor = data_size // len(filter_bins)
filter_bins = np.tile(filter_bins, tile_factor)
df = pd.concat([df, pd.DataFrame(
{self.short_name.lower(): filter_bins})])
return df
class WithIDFilter(Filter):
"""Abstract parent for filters of types with IDs (Cell, Material, etc.)."""
def __init__(self, bins, filter_id=None):
bins = np.atleast_1d(bins)
# Make sure bins are either integers or appropriate objects
cv.check_iterable_type('filter bins', bins,
(Integral, self.expected_type))
# Extract ID values
bins = np.array([b if isinstance(b, Integral) else b.id
for b in bins])
super().__init__(bins, filter_id)
def check_bins(self, bins):
# Check the bin values.
for edge in bins:
cv.check_greater_than('filter bin', edge, 0, equality=True)
class UniverseFilter(WithIDFilter):
"""Bins tally event locations based on the Universe they occured in.
Parameters
----------
bins : openmc.Universe, int, or iterable thereof
The Universes to tally. Either openmc.Universe objects or their
Integral ID numbers can be used.
filter_id : int
Unique identifier for the filter
Attributes
----------
bins : Iterable of Integral
openmc.Universe IDs.
id : int
Unique identifier for the filter
num_bins : Integral
The number of filter bins
"""
expected_type = Universe
class MaterialFilter(WithIDFilter):
"""Bins tally event locations based on the Material they occured in.
Parameters
----------
bins : openmc.Material, Integral, or iterable thereof
The Materials to tally. Either openmc.Material objects or their
Integral ID numbers can be used.
filter_id : int
Unique identifier for the filter
Attributes
----------
bins : Iterable of Integral
openmc.Material IDs.
id : int
Unique identifier for the filter
num_bins : Integral
The number of filter bins
"""
expected_type = Material
class CellFilter(WithIDFilter):
"""Bins tally event locations based on the Cell they occured in.
Parameters
----------
bins : openmc.Cell, int, or iterable thereof
The cells to tally. Either openmc.Cell objects or their ID numbers can
be used.
filter_id : int
Unique identifier for the filter
Attributes
----------
bins : Iterable of Integral
openmc.Cell IDs.
id : int
Unique identifier for the filter
num_bins : Integral
The number of filter bins
"""
expected_type = Cell
class CellFromFilter(WithIDFilter):
"""Bins tally on which Cell the neutron came from.
Parameters
----------
bins : openmc.Cell, Integral, or iterable thereof
The Cell(s) to tally. Either openmc.Cell objects or their
Integral ID numbers can be used.
filter_id : int
Unique identifier for the filter
Attributes
----------
bins : Integral or Iterable of Integral
openmc.Cell IDs.
id : int
Unique identifier for the filter
num_bins : Integral
The number of filter bins
"""
expected_type = Cell
class CellbornFilter(WithIDFilter):
"""Bins tally events based on which Cell the neutron was born in.
Parameters
----------
bins : openmc.Cell, Integral, or iterable thereof
The birth Cells to tally. Either openmc.Cell objects or their
Integral ID numbers can be used.
filter_id : int
Unique identifier for the filter
Attributes
----------
bins : Iterable of Integral
openmc.Cell IDs.
id : int
Unique identifier for the filter
num_bins : Integral
The number of filter bins
"""
expected_type = Cell
class CellInstanceFilter(Filter):
"""Bins tally events based on which cell instance a particle is in.
This filter is similar to :class:`DistribcellFilter` but allows one to
select particular instances to be tallied (instead of obtaining *all*
instances by default) and allows instances from different cells to be
specified in a single filter.
.. versionadded:: 0.12
Parameters
----------
bins : iterable of 2-tuples or numpy.ndarray
The cell instances to tally, given as 2-tuples. For the first value in
the tuple, either openmc.Cell objects or their integral ID numbers can
be used.
filter_id : int
Unique identifier for the filter
Attributes
----------
bins : numpy.ndarray
2D numpy array of cell IDs and instances
id : int
Unique identifier for the filter
num_bins : Integral
The number of filter bins
See Also
--------
DistribcellFilter
"""
def __init__(self, bins, filter_id=None):
self.bins = bins
self.id = filter_id
@Filter.bins.setter
def bins(self, bins):
pairs = np.empty((len(bins), 2), dtype=int)
for i, (cell, instance) in enumerate(bins):
cv.check_type('cell', cell, (openmc.Cell, Integral))
cv.check_type('instance', instance, Integral)
pairs[i, 0] = cell if isinstance(cell, Integral) else cell.id
pairs[i, 1] = instance
self._bins = pairs
def get_pandas_dataframe(self, data_size, stride, **kwargs):
"""Builds a Pandas DataFrame for the Filter's bins.
This method constructs a Pandas DataFrame object for the filter with
columns annotated by filter bin information. This is a helper method for
:meth:`Tally.get_pandas_dataframe`.
Parameters
----------
data_size : int
The total number of bins in the tally corresponding to this filter
stride : int
Stride in memory for the filter
Returns
-------
pandas.DataFrame
A Pandas DataFrame with a multi-index column for the cell instance.
The number of rows in the DataFrame is the same as the total number
of bins in the corresponding tally, with the filter bin appropriately
tiled to map to the corresponding tally bins.
See also
--------
Tally.get_pandas_dataframe(), CrossFilter.get_pandas_dataframe()
"""
# Repeat and tile bins as necessary to account for other filters.
bins = np.repeat(self.bins, stride, axis=0)
tile_factor = data_size // len(bins)
bins = np.tile(bins, (tile_factor, 1))
columns = pd.MultiIndex.from_product([[self.short_name.lower()],
['cell', 'instance']])
return pd.DataFrame(bins, columns=columns)
def to_xml_element(self):
"""Return XML Element representing the Filter.
Returns
-------
element : xml.etree.ElementTree.Element
XML element containing filter data
"""
element = ET.Element('filter')
element.set('id', str(self.id))
element.set('type', self.short_name.lower())
subelement = ET.SubElement(element, 'bins')
subelement.text = ' '.join(str(i) for i in self.bins.ravel())
return element
class SurfaceFilter(WithIDFilter):
"""Filters particles by surface crossing
Parameters
----------
bins : openmc.Surface, int, or iterable of Integral
The surfaces to tally over. Either openmc.Surface objects or their ID
numbers can be used.
filter_id : int
Unique identifier for the filter
Attributes
----------
bins : Iterable of Integral
The surfaces to tally over. Either openmc.Surface objects or their ID
numbers can be used.
id : int
Unique identifier for the filter
num_bins : Integral
The number of filter bins
"""
expected_type = Surface
class ParticleFilter(Filter):
"""Bins tally events based on the Particle type.
Parameters
----------
bins : str, or iterable of str
The particles to tally represented as strings ('neutron', 'photon',
'electron', 'positron').
filter_id : int
Unique identifier for the filter
Attributes
----------
bins : Iterable of Integral
The Particles to tally
id : int
Unique identifier for the filter
num_bins : Integral
The number of filter bins
"""
def __eq__(self, other):
if type(self) is not type(other):
return False
elif len(self.bins) != len(other.bins):
return False
else:
return np.all(self.bins == other.bins)
__hash__ = Filter.__hash__
@Filter.bins.setter
def bins(self, bins):
bins = np.atleast_1d(bins)
cv.check_iterable_type('filter bins', bins, str)
for edge in bins:
cv.check_value('filter bin', edge, _PARTICLES)
self._bins = bins
@classmethod
def from_hdf5(cls, group, **kwargs):
if group['type'][()].decode() != cls.short_name.lower():
raise ValueError("Expected HDF5 data for filter type '"
+ cls.short_name.lower() + "' but got '"
+ group['type'][()].decode() + " instead")
particles = [b.decode() for b in group['bins'][()]]
filter_id = int(group.name.split('/')[-1].lstrip('filter '))
return cls(particles, filter_id=filter_id)
class MeshFilter(Filter):
"""Bins tally event locations onto a regular, rectangular mesh.
Parameters
----------
mesh : openmc.MeshBase
The mesh object that events will be tallied onto
filter_id : int
Unique identifier for the filter
Attributes
----------
mesh : openmc.MeshBase
The mesh object that events will be tallied onto
id : int
Unique identifier for the filter
bins : list of tuple
A list of mesh indices for each filter bin, e.g. [(1, 1, 1), (2, 1, 1),
...]
num_bins : Integral
The number of filter bins
"""
def __init__(self, mesh, filter_id=None):
self.mesh = mesh
self.id = filter_id
def __hash__(self):
string = type(self).__name__ + '\n'
string += '{: <16}=\t{}\n'.format('\tMesh ID', self.mesh.id)
return hash(string)
def __repr__(self):
string = type(self).__name__ + '\n'
string += '{: <16}=\t{}\n'.format('\tMesh ID', self.mesh.id)
string += '{: <16}=\t{}\n'.format('\tID', self.id)
return string
@classmethod
def from_hdf5(cls, group, **kwargs):
if group['type'][()].decode() != cls.short_name.lower():
raise ValueError("Expected HDF5 data for filter type '"
+ cls.short_name.lower() + "' but got '"
+ group['type'][()].decode() + " instead")
if 'meshes' not in kwargs:
raise ValueError(cls.__name__ + " requires a 'meshes' keyword "
"argument.")
mesh_id = group['bins'][()]
mesh_obj = kwargs['meshes'][mesh_id]
filter_id = int(group.name.split('/')[-1].lstrip('filter '))
out = cls(mesh_obj, filter_id=filter_id)
return out
@property
def mesh(self):
return self._mesh
@mesh.setter
def mesh(self, mesh):
cv.check_type('filter mesh', mesh, openmc.MeshBase)
self._mesh = mesh
if isinstance(mesh, openmc.UnstructuredMesh):
if mesh.volumes is None:
self.bins = []
else:
self.bins = list(range(len(mesh.volumes)))
else:
self.bins = list(mesh.indices)
def can_merge(self, other):
# Mesh filters cannot have more than one bin
return False
def get_pandas_dataframe(self, data_size, stride, **kwargs):
"""Builds a Pandas DataFrame for the Filter's bins.
This method constructs a Pandas DataFrame object for the filter with
columns annotated by filter bin information. This is a helper method for
:meth:`Tally.get_pandas_dataframe`.
Parameters
----------
data_size : int
The total number of bins in the tally corresponding to this filter
stride : int
Stride in memory for the filter
Returns
-------
pandas.DataFrame
A Pandas DataFrame with three columns describing the x,y,z mesh
cell indices corresponding to each filter bin. The number of rows
in the DataFrame is the same as the total number of bins in the
corresponding tally, with the filter bin appropriately tiled to map
to the corresponding tally bins.
See also
--------
Tally.get_pandas_dataframe(), CrossFilter.get_pandas_dataframe()
"""
# Initialize Pandas DataFrame
df = pd.DataFrame()
# Initialize dictionary to build Pandas Multi-index column
filter_dict = {}
# Append mesh ID as outermost index of multi-index
mesh_key = 'mesh {}'.format(self.mesh.id)
# Find mesh dimensions - use 3D indices for simplicity
n_dim = len(self.mesh.dimension)
if n_dim == 3:
nx, ny, nz = self.mesh.dimension
elif n_dim == 2:
nx, ny = self.mesh.dimension
nz = 1
else:
nx = self.mesh.dimension
ny = nz = 1
# Generate multi-index sub-column for x-axis
filter_dict[mesh_key, 'x'] = _repeat_and_tile(
np.arange(1, nx + 1), stride, data_size)
# Generate multi-index sub-column for y-axis
filter_dict[mesh_key, 'y'] = _repeat_and_tile(
np.arange(1, ny + 1), nx * stride, data_size)
# Generate multi-index sub-column for z-axis
filter_dict[mesh_key, 'z'] = _repeat_and_tile(
np.arange(1, nz + 1), nx * ny * stride, data_size)
# Initialize a Pandas DataFrame from the mesh dictionary
df = pd.concat([df, pd.DataFrame(filter_dict)])
return df
def to_xml_element(self):
"""Return XML Element representing the Filter.
Returns
-------
element : xml.etree.ElementTree.Element
XML element containing filter data
"""
element = super().to_xml_element()
element[0].text = str(self.mesh.id)
return element
class MeshSurfaceFilter(MeshFilter):
"""Filter events by surface crossings on a regular, rectangular mesh.
Parameters
----------
mesh : openmc.MeshBase
The mesh object that events will be tallied onto
filter_id : int
Unique identifier for the filter
Attributes
----------
bins : Integral
The mesh ID
mesh : openmc.MeshBase
The mesh object that events will be tallied onto
id : int
Unique identifier for the filter
bins : list of tuple
A list of mesh indices / surfaces for each filter bin, e.g. [(1, 1,
'x-min out'), (1, 1, 'x-min in'), ...]
num_bins : Integral
The number of filter bins
"""
@MeshFilter.mesh.setter
def mesh(self, mesh):
cv.check_type('filter mesh', mesh, openmc.MeshBase)
self._mesh = mesh
# Take the product of mesh indices and current names
n_dim = mesh.n_dimension
self.bins = [mesh_tuple + (surf,) for mesh_tuple, surf in
product(mesh.indices, _CURRENT_NAMES[:4*n_dim])]
def get_pandas_dataframe(self, data_size, stride, **kwargs):
"""Builds a Pandas DataFrame for the Filter's bins.
This method constructs a Pandas DataFrame object for the filter with
columns annotated by filter bin information. This is a helper method for
:meth:`Tally.get_pandas_dataframe`.
Parameters
----------
data_size : int
The total number of bins in the tally corresponding to this filter
stride : int
Stride in memory for the filter
Returns
-------
pandas.DataFrame
A Pandas DataFrame with three columns describing the x,y,z mesh
cell indices corresponding to each filter bin. The number of rows
in the DataFrame is the same as the total number of bins in the
corresponding tally, with the filter bin appropriately tiled to map
to the corresponding tally bins.
See also
--------
Tally.get_pandas_dataframe(), CrossFilter.get_pandas_dataframe()
"""
# Initialize Pandas DataFrame
df = pd.DataFrame()
# Initialize dictionary to build Pandas Multi-index column
filter_dict = {}
# Append mesh ID as outermost index of multi-index
mesh_key = 'mesh {}'.format(self.mesh.id)
# Find mesh dimensions - use 3D indices for simplicity
n_surfs = 4 * len(self.mesh.dimension)
if len(self.mesh.dimension) == 3:
nx, ny, nz = self.mesh.dimension
elif len(self.mesh.dimension) == 2:
nx, ny = self.mesh.dimension
nz = 1
else:
nx = self.mesh.dimension
ny = nz = 1
# Generate multi-index sub-column for x-axis
filter_dict[mesh_key, 'x'] = _repeat_and_tile(
np.arange(1, nx + 1), n_surfs * stride, data_size)
# Generate multi-index sub-column for y-axis
if len(self.mesh.dimension) > 1:
filter_dict[mesh_key, 'y'] = _repeat_and_tile(
np.arange(1, ny + 1), n_surfs * nx * stride, data_size)
# Generate multi-index sub-column for z-axis
if len(self.mesh.dimension) > 2:
filter_dict[mesh_key, 'z'] = _repeat_and_tile(
np.arange(1, nz + 1), n_surfs * nx * ny * stride, data_size)
# Generate multi-index sub-column for surface
filter_dict[mesh_key, 'surf'] = _repeat_and_tile(
_CURRENT_NAMES[:n_surfs], stride, data_size)
# Initialize a Pandas DataFrame from the mesh dictionary
return pd.concat([df, pd.DataFrame(filter_dict)])
class RealFilter(Filter):
"""Tally modifier that describes phase-space and other characteristics
Parameters
----------
values : iterable of float
A list of values for which each successive pair constitutes a range of
values for a single bin
filter_id : int
Unique identifier for the filter
Attributes
----------
values : numpy.ndarray
An array of values for which each successive pair constitutes a range of
values for a single bin
id : int
Unique identifier for the filter
bins : numpy.ndarray
An array of shape (N, 2) where each row is a pair of values indicating a
filter bin range
num_bins : int
The number of filter bins
"""
def __init__(self, values, filter_id=None):
self.values = np.asarray(values)
self.bins = np.vstack((self.values[:-1], self.values[1:])).T
self.id = filter_id
def __gt__(self, other):
if type(self) is type(other):
# Compare largest/smallest bin edges in filters
# This logic is used when merging tallies with real filters
return self.values[0] >= other.values[-1]
else:
return super().__gt__(other)
def __repr__(self):
string = type(self).__name__ + '\n'
string += '{: <16}=\t{}\n'.format('\tValues', self.values)
string += '{: <16}=\t{}\n'.format('\tID', self.id)
return string
@Filter.bins.setter
def bins(self, bins):
Filter.bins.__set__(self, np.asarray(bins))
def check_bins(self, bins):
for v0, v1 in bins:
# Values should be real
cv.check_type('filter value', v0, Real)
cv.check_type('filter value', v1, Real)
# Make sure that each tuple has values that are increasing
if v1 < v0:
raise ValueError('Values {} and {} appear to be out of order'
.format(v0, v1))
for pair0, pair1 in zip(bins[:-1], bins[1:]):
# Successive pairs should be ordered
if pair1[1] < pair0[1]:
raise ValueError('Values {} and {} appear to be out of order'
.format(pair1[1], pair0[1]))
def can_merge(self, other):
if type(self) is not type(other):
return False
if self.bins[0, 0] == other.bins[-1][1]:
# This low edge coincides with other's high edge
return True
elif self.bins[-1][1] == other.bins[0, 0]:
# This high edge coincides with other's low edge
return True
else:
return False
def merge(self, other):
if not self.can_merge(other):
msg = 'Unable to merge "{0}" with "{1}" ' \
'filters'.format(type(self), type(other))
raise ValueError(msg)
# Merge unique filter bins
merged_values = np.concatenate((self.values, other.values))
merged_values = np.unique(merged_values)
# Create a new filter with these bins and a new auto-generated ID
return type(self)(sorted(merged_values))
def is_subset(self, other):
"""Determine if another filter is a subset of this filter.
If all of the bins in the other filter are included as bins in this
filter, then it is a subset of this filter.
Parameters
----------
other : openmc.Filter
The filter to query as a subset of this filter
Returns
-------
bool
Whether or not the other filter is a subset of this filter
"""
if type(self) is not type(other):
return False
elif self.num_bins != other.num_bins:
return False
else:
return np.allclose(self.values, other.values)
def get_bin_index(self, filter_bin):
i = np.where(self.bins[:, 1] == filter_bin[1])[0]
if len(i) == 0:
msg = 'Unable to get the bin index for Filter since "{0}" ' \
'is not one of the bins'.format(filter_bin)
raise ValueError(msg)
else:
return i[0]
def get_pandas_dataframe(self, data_size, stride, **kwargs):
"""Builds a Pandas DataFrame for the Filter's bins.
This method constructs a Pandas DataFrame object for the filter with
columns annotated by filter bin information. This is a helper method for
:meth:`Tally.get_pandas_dataframe`.
Parameters
----------
data_size : int
The total number of bins in the tally corresponding to this filter
stride : int
Stride in memory for the filter
Returns
-------
pandas.DataFrame
A Pandas DataFrame with one column of the lower energy bound and one
column of upper energy bound for each filter bin. The number of
rows in the DataFrame is the same as the total number of bins in the
corresponding tally, with the filter bin appropriately tiled to map
to the corresponding tally bins.
See also
--------
Tally.get_pandas_dataframe(), CrossFilter.get_pandas_dataframe()
"""
# Initialize Pandas DataFrame
df = pd.DataFrame()
# Extract the lower and upper energy bounds, then repeat and tile
# them as necessary to account for other filters.
lo_bins = np.repeat(self.bins[:, 0], stride)
hi_bins = np.repeat(self.bins[:, 1], stride)
tile_factor = data_size // len(lo_bins)
lo_bins = np.tile(lo_bins, tile_factor)
hi_bins = np.tile(hi_bins, tile_factor)
# Add the new energy columns to the DataFrame.
if hasattr(self, 'units'):
units = ' [{}]'.format(self.units)
else:
units = ''
df.loc[:, self.short_name.lower() + ' low' + units] = lo_bins
df.loc[:, self.short_name.lower() + ' high' + units] = hi_bins
return df
def to_xml_element(self):
"""Return XML Element representing the Filter.
Returns
-------
element : xml.etree.ElementTree.Element
XML element containing filter data
"""
element = super().to_xml_element()
element[0].text = ' '.join(str(x) for x in self.values)
return element
class EnergyFilter(RealFilter):
"""Bins tally events based on incident particle energy.
Parameters
----------
values : Iterable of Real
A list of values for which each successive pair constitutes a range of
energies in [eV] for a single bin
filter_id : int
Unique identifier for the filter
Attributes
----------
values : numpy.ndarray
An array of values for which each successive pair constitutes a range of
energies in [eV] for a single bin
id : int
Unique identifier for the filter
bins : numpy.ndarray
An array of shape (N, 2) where each row is a pair of energies in [eV]
for a single filter bin
num_bins : int
The number of filter bins
"""
units = 'eV'
def get_bin_index(self, filter_bin):
# Use lower energy bound to find index for RealFilters
deltas = np.abs(self.bins[:, 1] - filter_bin[1]) / filter_bin[1]
min_delta = np.min(deltas)
if min_delta < 1E-3:
return deltas.argmin()
else:
msg = 'Unable to get the bin index for Filter since "{0}" ' \
'is not one of the bins'.format(filter_bin)
raise ValueError(msg)
def check_bins(self, bins):
super().check_bins(bins)
for v0, v1 in bins:
cv.check_greater_than('filter value', v0, 0., equality=True)
cv.check_greater_than('filter value', v1, 0., equality=True)
class EnergyoutFilter(EnergyFilter):
"""Bins tally events based on outgoing particle energy.
Parameters
----------
values : Iterable of Real
A list of values for which each successive pair constitutes a range of
energies in [eV] for a single bin
filter_id : int
Unique identifier for the filter
Attributes
----------
values : numpy.ndarray
An array of values for which each successive pair constitutes a range of
energies in [eV] for a single bin
id : int
Unique identifier for the filter
bins : numpy.ndarray
An array of shape (N, 2) where each row is a pair of energies in [eV]
for a single filter bin
num_bins : int
The number of filter bins
"""
def _path_to_levels(path):
"""Convert distribcell path to list of levels
Parameters
----------
path : str
Distribcell path
Returns
-------
list
List of levels in path
"""
# Split path into universes/cells/lattices
path_items = path.split('->')
# Pair together universe and cell information from the same level
idx = [i for i, item in enumerate(path_items) if item.startswith('u')]
for i in reversed(idx):
univ_id = int(path_items.pop(i)[1:])
cell_id = int(path_items.pop(i)[1:])
path_items.insert(i, ('universe', univ_id, cell_id))
# Reformat lattice into tuple
idx = [i for i, item in enumerate(path_items) if isinstance(item, str)]
for i in idx:
item = path_items.pop(i)[1:-1]
lat_id, lat_xyz = item.split('(')
lat_id = int(lat_id)
lat_xyz = tuple(int(x) for x in lat_xyz.split(','))
path_items.insert(i, ('lattice', lat_id, lat_xyz))
return path_items
class DistribcellFilter(Filter):
"""Bins tally event locations on instances of repeated cells.
This filter provides a separate score for each unique instance of a repeated
cell in a geometry. Note that only one cell can be specified in this filter.
The related :class:`CellInstanceFilter` allows one to obtain scores for
particular cell instances as well as instances from different cells.
Parameters
----------
cell : openmc.Cell or Integral
The distributed cell to tally. Either an openmc.Cell or an Integral
cell ID number can be used.
filter_id : int
Unique identifier for the filter
Attributes
----------
bins : Iterable of Integral
An iterable with one element---the ID of the distributed Cell.
id : int
Unique identifier for the filter
num_bins : int
The number of filter bins
paths : list of str
The paths traversed through the CSG tree to reach each distribcell
instance (for 'distribcell' filters only)
See Also
--------
CellInstanceFilter
"""
def __init__(self, cell, filter_id=None):
self._paths = None
super().__init__(cell, filter_id)
@classmethod
def from_hdf5(cls, group, **kwargs):
if group['type'][()].decode() != cls.short_name.lower():
raise ValueError("Expected HDF5 data for filter type '"
+ cls.short_name.lower() + "' but got '"
+ group['type'][()].decode() + " instead")
filter_id = int(group.name.split('/')[-1].lstrip('filter '))
out = cls(group['bins'][()], filter_id=filter_id)
out._num_bins = group['n_bins'][()]
return out
@property
def num_bins(self):
# Need to handle number of bins carefully -- for distribcell tallies, we
# need to know how many instances of the cell there are
return self._num_bins
@property
def paths(self):
return self._paths
@Filter.bins.setter
def bins(self, bins):
# Format the bins as a 1D numpy array.
bins = np.atleast_1d(bins)
# Make sure there is only 1 bin.
if not len(bins) == 1:
msg = 'Unable to add bins "{0}" to a DistribcellFilter since ' \
'only a single distribcell can be used per tally'.format(bins)
raise ValueError(msg)
# Check the type and extract the id, if necessary.
cv.check_type('distribcell bin', bins[0], (Integral, openmc.Cell))
if isinstance(bins[0], openmc.Cell):
bins = np.atleast_1d(bins[0].id)
self._bins = bins
@paths.setter
def paths(self, paths):
cv.check_iterable_type('paths', paths, str)
self._paths = paths
def can_merge(self, other):
# Distribcell filters cannot have more than one bin
return False
def get_bin_index(self, filter_bin):
# Filter bins for distribcells are indices of each unique placement of
# the Cell in the Geometry (consecutive integers starting at 0).
return filter_bin
def get_pandas_dataframe(self, data_size, stride, **kwargs):
"""Builds a Pandas DataFrame for the Filter's bins.
This method constructs a Pandas DataFrame object for the filter with
columns annotated by filter bin information. This is a helper method for
:meth:`Tally.get_pandas_dataframe`.
Parameters
----------
data_size : int
The total number of bins in the tally corresponding to this filter
stride : int
Stride in memory for the filter
Keyword arguments
-----------------
paths : bool
If True (default), expand distribcell indices into multi-index
columns describing the path to that distribcell through the CSG
tree. NOTE: This option assumes that all distribcell paths are of
the same length and do not have the same universes and cells but
different lattice cell indices.
Returns
-------
pandas.DataFrame
A Pandas DataFrame with columns describing distributed cells. The
dataframe will have either:
1. a single column with the cell instance IDs (without summary info)
2. separate columns for the cell IDs, universe IDs, and lattice IDs
and x,y,z cell indices corresponding to each (distribcell paths).
The number of rows in the DataFrame is the same as the total number
of bins in the corresponding tally, with the filter bin
appropriately tiled to map to the corresponding tally bins.
See also
--------
Tally.get_pandas_dataframe(), CrossFilter.get_pandas_dataframe()
"""
# Initialize Pandas DataFrame
df = pd.DataFrame()
level_df = None
paths = kwargs.setdefault('paths', True)
# Create Pandas Multi-index columns for each level in CSG tree
if paths:
# Distribcell paths require linked metadata from the Summary
if self.paths is None:
msg = 'Unable to construct distribcell paths since ' \
'the Summary is not linked to the StatePoint'
raise ValueError(msg)
# Make copy of array of distribcell paths to use in
# Pandas Multi-index column construction
num_offsets = len(self.paths)
paths = [_path_to_levels(p) for p in self.paths]
# Loop over CSG levels in the distribcell paths
num_levels = len(paths[0])
for i_level in range(num_levels):
# Use level key as first index in Pandas Multi-index column
level_key = 'level {}'.format(i_level + 1)
# Create a dictionary for this level for Pandas Multi-index
level_dict = OrderedDict()
# Use the first distribcell path to determine if level
# is a universe/cell or lattice level
path = paths[0]
if path[i_level][0] == 'lattice':
# Initialize prefix Multi-index keys
lat_id_key = (level_key, 'lat', 'id')
lat_x_key = (level_key, 'lat', 'x')
lat_y_key = (level_key, 'lat', 'y')
lat_z_key = (level_key, 'lat', 'z')
# Allocate NumPy arrays for each CSG level and
# each Multi-index column in the DataFrame
level_dict[lat_id_key] = np.empty(num_offsets)
level_dict[lat_x_key] = np.empty(num_offsets)
level_dict[lat_y_key] = np.empty(num_offsets)
if len(path[i_level][2]) == 3:
level_dict[lat_z_key] = np.empty(num_offsets)
else:
# Initialize prefix Multi-index keys
univ_key = (level_key, 'univ', 'id')
cell_key = (level_key, 'cell', 'id')
# Allocate NumPy arrays for each CSG level and
# each Multi-index column in the DataFrame
level_dict[univ_key] = np.empty(num_offsets)
level_dict[cell_key] = np.empty(num_offsets)
# Populate Multi-index arrays with all distribcell paths
for i, path in enumerate(paths):
level = path[i_level]
if level[0] == 'lattice':
# Assign entry to Lattice Multi-index column
level_dict[lat_id_key][i] = level[1]
level_dict[lat_x_key][i] = level[2][0]
level_dict[lat_y_key][i] = level[2][1]
if len(level[2]) == 3:
level_dict[lat_z_key][i] = level[2][2]
else:
# Assign entry to Universe, Cell Multi-index columns
level_dict[univ_key][i] = level[1]
level_dict[cell_key][i] = level[2]
# Tile the Multi-index columns
for level_key, level_bins in level_dict.items():
level_dict[level_key] = _repeat_and_tile(
level_bins, stride, data_size)
# Initialize a Pandas DataFrame from the level dictionary
if level_df is None:
level_df = pd.DataFrame(level_dict)
else:
level_df = pd.concat([level_df, pd.DataFrame(level_dict)],
axis=1)
# Create DataFrame column for distribcell instance IDs
# NOTE: This is performed regardless of whether the user
# requests Summary geometric information
filter_bins = _repeat_and_tile(
np.arange(self.num_bins), stride, data_size)
df = pd.DataFrame({self.short_name.lower() : filter_bins})
# Concatenate with DataFrame of distribcell instance IDs
if level_df is not None:
level_df = level_df.dropna(axis=1, how='all')
level_df = level_df.astype(np.int)
df = pd.concat([level_df, df], axis=1)
return df
class MuFilter(RealFilter):
"""Bins tally events based on particle scattering angle.
Parameters
----------
values : int or Iterable of Real
A grid of scattering angles which events will binned into. Values
represent the cosine of the scattering angle. If an iterable is given,
the values will be used explicitly as grid points. If a single int is
given, the range [-1, 1] will be divided up equally into that number of
bins.
filter_id : int
Unique identifier for the filter
Attributes
----------
values : numpy.ndarray
An array of values for which each successive pair constitutes a range of
scattering angle cosines for a single bin
id : int
Unique identifier for the filter
bins : numpy.ndarray
An array of shape (N, 2) where each row is a pair of scattering angle
cosines for a single filter bin
num_bins : Integral
The number of filter bins
"""
def __init__(self, values, filter_id=None):
if isinstance(values, Integral):
values = np.linspace(-1., 1., values + 1)
super().__init__(values, filter_id)
def check_bins(self, bins):
super().check_bins(bins)
for x in np.ravel(bins):
if not np.isclose(x, -1.):
cv.check_greater_than('filter value', x, -1., equality=True)
if not np.isclose(x, 1.):
cv.check_less_than('filter value', x, 1., equality=True)
class PolarFilter(RealFilter):
"""Bins tally events based on the incident particle's direction.
Parameters
----------
values : int or Iterable of Real
A grid of polar angles which events will binned into. Values represent
an angle in radians relative to the z-axis. If an iterable is given, the
values will be used explicitly as grid points. If a single int is given,
the range [0, pi] will be divided up equally into that number of bins.
filter_id : int
Unique identifier for the filter
Attributes
----------
values : numpy.ndarray
An array of values for which each successive pair constitutes a range of
polar angles in [rad] for a single bin
id : int
Unique identifier for the filter
bins : numpy.ndarray
An array of shape (N, 2) where each row is a pair of polar angles for a
single filter bin
id : int
Unique identifier for the filter
num_bins : Integral
The number of filter bins
"""
units = 'rad'
def __init__(self, values, filter_id=None):
if isinstance(values, Integral):
values = np.linspace(0., np.pi, values + 1)
super().__init__(values, filter_id)
def check_bins(self, bins):
super().check_bins(bins)
for x in np.ravel(bins):
if not np.isclose(x, 0.):
cv.check_greater_than('filter value', x, 0., equality=True)
if not np.isclose(x, np.pi):
cv.check_less_than('filter value', x, np.pi, equality=True)
class AzimuthalFilter(RealFilter):
"""Bins tally events based on the incident particle's direction.
Parameters
----------
values : int or Iterable of Real
A grid of azimuthal angles which events will binned into. Values
represent an angle in radians relative to the x-axis and perpendicular
to the z-axis. If an iterable is given, the values will be used
explicitly as grid points. If a single int is given, the range
[-pi, pi) will be divided up equally into that number of bins.
filter_id : int
Unique identifier for the filter
Attributes
----------
values : numpy.ndarray
An array of values for which each successive pair constitutes a range of
azimuthal angles in [rad] for a single bin
id : int
Unique identifier for the filter
bins : numpy.ndarray
An array of shape (N, 2) where each row is a pair of azimuthal angles
for a single filter bin
num_bins : Integral
The number of filter bins
"""
units = 'rad'
def __init__(self, values, filter_id=None):
if isinstance(values, Integral):
values = np.linspace(-np.pi, np.pi, values + 1)
super().__init__(values, filter_id)
def check_bins(self, bins):
super().check_bins(bins)
for x in np.ravel(bins):
if not np.isclose(x, -np.pi):
cv.check_greater_than('filter value', x, -np.pi, equality=True)
if not np.isclose(x, np.pi):
cv.check_less_than('filter value', x, np.pi, equality=True)
class DelayedGroupFilter(Filter):
"""Bins fission events based on the produced neutron precursor groups.
Parameters
----------
bins : iterable of int
The delayed neutron precursor groups. For example, ENDF/B-VII.1 uses
6 precursor groups so a tally with all groups will have bins =
[1, 2, 3, 4, 5, 6].
filter_id : int
Unique identifier for the filter
Attributes
----------
bins : iterable of int
The delayed neutron precursor groups. For example, ENDF/B-VII.1 uses
6 precursor groups so a tally with all groups will have bins =
[1, 2, 3, 4, 5, 6].
id : int
Unique identifier for the filter
num_bins : Integral
The number of filter bins
"""
def check_bins(self, bins):
# Check the bin values.
for g in bins:
cv.check_greater_than('delayed group', g, 0)
class EnergyFunctionFilter(Filter):
"""Multiplies tally scores by an arbitrary function of incident energy.
The arbitrary function is described by a piecewise linear-linear
interpolation of energy and y values. Values outside of the given energy
range will be evaluated as zero.
Parameters
----------
energy : Iterable of Real
A grid of energy values in [eV]
y : iterable of Real
A grid of interpolant values in [eV]
filter_id : int
Unique identifier for the filter
Attributes
----------
energy : Iterable of Real
A grid of energy values in [eV]
y : iterable of Real
A grid of interpolant values in [eV]
id : int
Unique identifier for the filter
num_bins : Integral
The number of filter bins (always 1 for this filter)
"""
def __init__(self, energy, y, filter_id=None):
self.energy = energy
self.y = y
self.id = filter_id
def __eq__(self, other):
if type(self) is not type(other):
return False
elif not all(self.energy == other.energy):
return False
else:
return all(self.y == other.y)
def __gt__(self, other):
if type(self) is not type(other):
if self.short_name in _FILTER_TYPES and \
other.short_name in _FILTER_TYPES:
delta = _FILTER_TYPES.index(self.short_name) - \
_FILTER_TYPES.index(other.short_name)
return delta > 0
else:
return False
else:
return False
def __lt__(self, other):
if type(self) is not type(other):
if self.short_name in _FILTER_TYPES and \
other.short_name in _FILTER_TYPES:
delta = _FILTER_TYPES.index(self.short_name) - \
_FILTER_TYPES.index(other.short_name)
return delta < 0
else:
return False
else:
return False
def __hash__(self):
string = type(self).__name__ + '\n'
string += '{: <16}=\t{}\n'.format('\tEnergy', self.energy)
string += '{: <16}=\t{}\n'.format('\tInterpolant', self.y)
return hash(string)
def __repr__(self):
string = type(self).__name__ + '\n'
string += '{: <16}=\t{}\n'.format('\tEnergy', self.energy)
string += '{: <16}=\t{}\n'.format('\tInterpolant', self.y)
string += '{: <16}=\t{}\n'.format('\tID', self.id)
return string
@classmethod
def from_hdf5(cls, group, **kwargs):
if group['type'][()].decode() != cls.short_name.lower():
raise ValueError("Expected HDF5 data for filter type '"
+ cls.short_name.lower() + "' but got '"
+ group['type'][()].decode() + " instead")
energy = group['energy'][()]
y = group['y'][()]
filter_id = int(group.name.split('/')[-1].lstrip('filter '))
return cls(energy, y, filter_id=filter_id)
@classmethod
def from_tabulated1d(cls, tab1d):
"""Construct a filter from a Tabulated1D object.
Parameters
----------
tab1d : openmc.data.Tabulated1D
A linear-linear Tabulated1D object with only a single interpolation
region.
Returns
-------
EnergyFunctionFilter
"""
cv.check_type('EnergyFunctionFilter tab1d', tab1d,
openmc.data.Tabulated1D)
if tab1d.n_regions > 1:
raise ValueError('Only Tabulated1Ds with a single interpolation '
'region are supported')
if tab1d.interpolation[0] != 2:
raise ValueError('Only linear-linar Tabulated1Ds are supported')
return cls(tab1d.x, tab1d.y)
@property
def energy(self):
return self._energy
@property
def y(self):
return self._y
@property
def bins(self):
raise AttributeError('EnergyFunctionFilters have no bins.')
@property
def num_bins(self):
return 1
@energy.setter
def energy(self, energy):
# Format the bins as a 1D numpy array.
energy = np.atleast_1d(energy)
# Make sure the values are Real and positive.
cv.check_type('filter energy grid', energy, Iterable, Real)
for E in energy:
cv.check_greater_than('filter energy grid', E, 0, equality=True)
self._energy = energy
@y.setter
def y(self, y):
# Format the bins as a 1D numpy array.
y = np.atleast_1d(y)
# Make sure the values are Real.
cv.check_type('filter interpolant values', y, Iterable, Real)
self._y = y
@bins.setter
def bins(self, bins):
raise RuntimeError('EnergyFunctionFilters have no bins.')
def to_xml_element(self):
"""Return XML Element representing the Filter.
Returns
-------
element : xml.etree.ElementTree.Element
XML element containing filter data
"""
element = ET.Element('filter')
element.set('id', str(self.id))
element.set('type', self.short_name.lower())
subelement = ET.SubElement(element, 'energy')
subelement.text = ' '.join(str(e) for e in self.energy)
subelement = ET.SubElement(element, 'y')
subelement.text = ' '.join(str(y) for y in self.y)
return element
def can_merge(self, other):
return False
def is_subset(self, other):
return self == other
def get_bin_index(self, filter_bin):
# This filter only has one bin. Always return 0.
return 0
def get_pandas_dataframe(self, data_size, stride, **kwargs):
"""Builds a Pandas DataFrame for the Filter's bins.
This method constructs a Pandas DataFrame object for the filter with
columns annotated by filter bin information. This is a helper method for
:meth:`Tally.get_pandas_dataframe`.
Parameters
----------
data_size : int
The total number of bins in the tally corresponding to this filter
stride : int
Stride in memory for the filter
Returns
-------
pandas.DataFrame
A Pandas DataFrame with a column that is filled with a hash of this
filter. EnergyFunctionFilters have only 1 bin so the purpose of this
DataFrame column is to differentiate the filter from other
EnergyFunctionFilters. The number of rows in the DataFrame is the
same as the total number of bins in the corresponding tally.
See also
--------
Tally.get_pandas_dataframe(), CrossFilter.get_pandas_dataframe()
"""
df = pd.DataFrame()
# There is no clean way of sticking all the energy, y data into a
# DataFrame so instead we'll just make a column with the filter name
# and fill it with a hash of the __repr__. We want a hash that is
# reproducible after restarting the interpreter so we'll use hashlib.md5
# rather than the intrinsic hash().
hash_fun = hashlib.md5()
hash_fun.update(repr(self).encode('utf-8'))
out = hash_fun.hexdigest()
# The full 16 bytes make for a really wide column. Just 7 bytes (14
# hex characters) of the digest are probably sufficient.
out = out[:14]
filter_bins = _repeat_and_tile(out, stride, data_size)
df = pd.concat([df, pd.DataFrame(
{self.short_name.lower(): filter_bins})])
return df
| mit |
shikhardb/scikit-learn | sklearn/tree/export.py | 30 | 4529 | """
This module defines export functions for decision trees.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Licence: BSD 3 clause
from ..externals import six
from . import _tree
def export_graphviz(decision_tree, out_file="tree.dot", feature_names=None,
max_depth=None):
"""Export a decision tree in DOT format.
This function generates a GraphViz representation of the decision tree,
which is then written into `out_file`. Once exported, graphical renderings
can be generated using, for example::
$ dot -Tps tree.dot -o tree.ps (PostScript format)
$ dot -Tpng tree.dot -o tree.png (PNG format)
The sample counts that are shown are weighted with any sample_weights that
might be present.
Parameters
----------
decision_tree : decision tree classifier
The decision tree to be exported to GraphViz.
out_file : file object or string, optional (default="tree.dot")
Handle or name of the output file.
feature_names : list of strings, optional (default=None)
Names of each of the features.
max_depth : int, optional (default=None)
The maximum depth of the representation. If None, the tree is fully
generated.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn import tree
>>> clf = tree.DecisionTreeClassifier()
>>> iris = load_iris()
>>> clf = clf.fit(iris.data, iris.target)
>>> tree.export_graphviz(clf,
... out_file='tree.dot') # doctest: +SKIP
"""
def node_to_str(tree, node_id, criterion):
if not isinstance(criterion, six.string_types):
criterion = "impurity"
value = tree.value[node_id]
if tree.n_outputs == 1:
value = value[0, :]
if tree.children_left[node_id] == _tree.TREE_LEAF:
return "%s = %.4f\\nsamples = %s\\nvalue = %s" \
% (criterion,
tree.impurity[node_id],
tree.n_node_samples[node_id],
value)
else:
if feature_names is not None:
feature = feature_names[tree.feature[node_id]]
else:
feature = "X[%s]" % tree.feature[node_id]
return "%s <= %.4f\\n%s = %s\\nsamples = %s" \
% (feature,
tree.threshold[node_id],
criterion,
tree.impurity[node_id],
tree.n_node_samples[node_id])
def recurse(tree, node_id, criterion, parent=None, depth=0):
if node_id == _tree.TREE_LEAF:
raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF)
left_child = tree.children_left[node_id]
right_child = tree.children_right[node_id]
# Add node with description
if max_depth is None or depth <= max_depth:
out_file.write('%d [label="%s", shape="box"] ;\n' %
(node_id, node_to_str(tree, node_id, criterion)))
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d ;\n' % (parent, node_id))
if left_child != _tree.TREE_LEAF:
recurse(tree, left_child, criterion=criterion, parent=node_id,
depth=depth + 1)
recurse(tree, right_child, criterion=criterion, parent=node_id,
depth=depth + 1)
else:
out_file.write('%d [label="(...)", shape="box"] ;\n' % node_id)
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d ;\n' % (parent, node_id))
own_file = False
try:
if isinstance(out_file, six.string_types):
if six.PY3:
out_file = open(out_file, "w", encoding="utf-8")
else:
out_file = open(out_file, "wb")
own_file = True
out_file.write("digraph Tree {\n")
if isinstance(decision_tree, _tree.Tree):
recurse(decision_tree, 0, criterion="impurity")
else:
recurse(decision_tree.tree_, 0, criterion=decision_tree.criterion)
out_file.write("}")
finally:
if own_file:
out_file.close()
| bsd-3-clause |
yanlend/scikit-learn | sklearn/feature_selection/tests/test_base.py | 143 | 3670 | import numpy as np
from scipy import sparse as sp
from nose.tools import assert_raises, assert_equal
from numpy.testing import assert_array_equal
from sklearn.base import BaseEstimator
from sklearn.feature_selection.base import SelectorMixin
from sklearn.utils import check_array
class StepSelector(SelectorMixin, BaseEstimator):
"""Retain every `step` features (beginning with 0)"""
def __init__(self, step=2):
self.step = step
def fit(self, X, y=None):
X = check_array(X, 'csc')
self.n_input_feats = X.shape[1]
return self
def _get_support_mask(self):
mask = np.zeros(self.n_input_feats, dtype=bool)
mask[::self.step] = True
return mask
support = [True, False] * 5
support_inds = [0, 2, 4, 6, 8]
X = np.arange(20).reshape(2, 10)
Xt = np.arange(0, 20, 2).reshape(2, 5)
Xinv = X.copy()
Xinv[:, 1::2] = 0
y = [0, 1]
feature_names = list('ABCDEFGHIJ')
feature_names_t = feature_names[::2]
feature_names_inv = np.array(feature_names)
feature_names_inv[1::2] = ''
def test_transform_dense():
sel = StepSelector()
Xt_actual = sel.fit(X, y).transform(X)
Xt_actual2 = StepSelector().fit_transform(X, y)
assert_array_equal(Xt, Xt_actual)
assert_array_equal(Xt, Xt_actual2)
# Check dtype matches
assert_equal(np.int32, sel.transform(X.astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(X.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_t_actual = sel.transform([feature_names])
assert_array_equal(feature_names_t, names_t_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xt_actual = sel.fit(sparse(X)).transform(sparse(X))
Xt_actual2 = sel.fit_transform(sparse(X))
assert_array_equal(Xt, Xt_actual.toarray())
assert_array_equal(Xt, Xt_actual2.toarray())
# Check dtype matches
assert_equal(np.int32, sel.transform(sparse(X).astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(sparse(X).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_inverse_transform_dense():
sel = StepSelector()
Xinv_actual = sel.fit(X, y).inverse_transform(Xt)
assert_array_equal(Xinv, Xinv_actual)
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(Xt.astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(Xt.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_inv_actual = sel.inverse_transform([feature_names_t])
assert_array_equal(feature_names_inv, names_inv_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_inverse_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xinv_actual = sel.fit(sparse(X)).inverse_transform(sparse(Xt))
assert_array_equal(Xinv, Xinv_actual.toarray())
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(sparse(Xt).astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(sparse(Xt).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_get_support():
sel = StepSelector()
sel.fit(X, y)
assert_array_equal(support, sel.get_support())
assert_array_equal(support_inds, sel.get_support(indices=True))
| bsd-3-clause |
ye-zhi/project-epsilon | code/utils/scripts/noise-pca_script.py | 1 | 13894 | """
This script is used to design the design matrix for our linear regression.
We explore the influence of linear and quadratic drifts on the model
performance.
Script for the raw data.
Run with:
python noise-pca_script.py
from this directory
"""
from __future__ import print_function, division
import sys, os, pdb
from scipy import ndimage
from scipy.ndimage import gaussian_filter
from matplotlib import colors
from os.path import splitext
from scipy.stats import t as t_dist
import numpy as np
import numpy.linalg as npl
import matplotlib.pyplot as plt
import nibabel as nib
import scipy
import pprint as pp
import json
#Specicy the path for functions
sys.path.append(os.path.join(os.path.dirname(__file__), "../functions/"))
sys.path.append(os.path.join(os.path.dirname(__file__), "./"))
from smoothing import *
from diagnostics import *
from glm import *
from plot_mosaic import *
from mask_filtered_data import *
# Locate the paths
project_path = '../../../'
data_path = project_path+'data/ds005/'
path_dict = {'data_filtered':{
'type' : 'filtered',
'feat' : '.feat/',
'bold_img_name' : 'filtered_func_data_mni.nii.gz',
'run_path' : 'model/model001/'
},
'data_original':{
'type' : '',
'feat': '',
'bold_img_name' : 'bold.nii.gz',
'run_path' : 'BOLD/'
}}
# TODO: uncomment for final version
#subject_list = [str(i) for i in range(1,17)]
#run_list = [str(i) for i in range(1,4)]
# Run only for subject 1 and 5 - run 1
run_list = [str(i) for i in range(1,2)]
subject_list = ['1', '5']
d_path = path_dict['data_original'] #OR original or filtered
images_paths = [('ds005' + '_sub' + s.zfill(3) + '_t1r' + r, \
data_path + 'sub%s/'%(s.zfill(3)) + d_path['run_path'] \
+ 'task001_run%s%s/%s' %(r.zfill(3),d_path['feat'],\
d_path['bold_img_name'])) \
for r in run_list \
for s in subject_list]
# set gray colormap and nearest neighbor interpolation by default
plt.rcParams['image.cmap'] = 'gray'
plt.rcParams['image.interpolation'] = 'nearest'
# Mask
# To be used with the normal data
thres = 375 #From analysis of the histograms
# To be used with the filtered data
mask_path = project_path+'data/mni_icbm152_t1_tal_nlin_asym_09c_mask_2mm.nii'
sm = ''
#sm='not_smooth/'
project_path = project_path + sm
# Create the needed directories if they do not exist
dirs = [project_path+'fig/',\
project_path+'fig/BOLD',\
project_path+'fig/drifts',\
project_path+'fig/pca',\
project_path+'fig/pca/projections/',\
project_path+'fig/linear_model/mosaic',\
project_path+'fig/linear_model/mosaic/middle_slice',\
project_path+'txt_output/',\
project_path+'txt_output/MRSS/',\
project_path+'txt_output/pca/',\
project_path+'txt_output/drifts/']
for d in dirs:
if not os.path.exists(d):
os.makedirs(d)
print("Starting noise-pca for the raw data analysis\n")
for image_path in images_paths:
name = image_path[0]
if d_path['type']=='filtered':
in_brain_img = nib.load('../../../'+
'data/ds005/sub001/model/model001/task001_run001.feat/'\
+ 'masked_filtered_func_data_mni.nii.gz')
# Image shape (91, 109, 91, 240)
#in_brain_img = make_mask_filtered_data(image_path[1],mask_path)
data_int = in_brain_img.get_data()
data = data_int.astype(float)
mean_data = np.mean(data, axis=-1)
in_brain_mask = (mean_data - 0.0) < 0.01
Transpose = False
else:
img = nib.load(image_path[1])
data_int = img.get_data()
data = data_int.astype(float)
mean_data = np.mean(data, axis=-1)
in_brain_mask = mean_data > thres
Transpose = True
# Smoothing with Gaussian filter
smooth_data = smoothing(data,1,range(data.shape[-1]))
# Selecting the voxels in the brain
in_brain_tcs = smooth_data[in_brain_mask, :]
#in_brain_tcs = data[in_brain_mask, :]
vol_shape = data.shape[:-1]
# Plotting the voxels in the brain
plt.imshow(plot_mosaic(mean_data, transpose=Transpose), cmap='gray', alpha=1)
plt.colorbar()
plt.contour(plot_mosaic(in_brain_mask, transpose=Transpose),colors='blue')
plt.title('In brain voxel mean values' + '\n' + (d_path['type'] + str(name)))
plt.savefig(project_path+'fig/BOLD/%s_mean_voxels_countour.png'\
%(d_path['type'] + str(name)))
#plt.show()
plt.clf()
# Convolution with 1 to 4 conditions
convolved = np.zeros((240,5))
for i in range(1,5):
#convolved = np.loadtxt(\
# '../../../txt_output/conv_normal/%s_conv_00%s_canonical.txt'\
# %(str(name),str(i)))
convolved[:,i] = np.loadtxt(\
'../../../txt_output/conv_high_res/%s_conv_00%s_high_res.txt'\
%(str(name),str(i)))
reg_str = ['Intercept','Task', 'Gain', 'Loss', 'Distance', 'Linear Drift',\
'Quadratic drift', 'PC#1', 'PC#2', 'PC#3', 'PC#4']
# Create design matrix X - Including drifts
P = 7 #number of regressors of X including the ones for intercept
n_trs = data.shape[-1]
X = np.ones((n_trs, P))
for i in range(1,5):
X[:,i] = convolved[:,i]
linear_drift = np.linspace(-1, 1, n_trs)
X[:,5] = linear_drift
quadratic_drift = linear_drift ** 2
quadratic_drift -= np.mean(quadratic_drift)
X[:,6] = quadratic_drift
# Save the design matrix
np.savetxt(project_path+\
'txt_output/drifts/%s_design_matrix_with_drift.txt'\
%(d_path['type'] + str(name)), X)
# Linear Model - Including drifts
Y = in_brain_tcs.T
betas = npl.pinv(X).dot(Y)
# Save the betas for the linear model including drifts
np.savetxt(project_path+\
'txt_output/drifts/%s_betas_with_drift.txt'%(d_path['type'] + str(name)), betas)
betas_vols = np.zeros(vol_shape + (P,))
betas_vols[in_brain_mask] = betas.T
# Plot
# Set regions outside mask as missing with np.nan
mean_data[~in_brain_mask] = np.nan
betas_vols[~in_brain_mask] = np.nan
nice_cmap_values = np.loadtxt('actc.txt')
nice_cmap = colors.ListedColormap(nice_cmap_values, 'actc')
# Plot each slice on the 3rd dimension of the image in a mosaic
for k in range(1,P):
plt.imshow(plot_mosaic(mean_data, transpose=Transpose), cmap='gray', alpha=1)
#plt.imshow(plot_mosaic(betas_vols[...,k], transpose=Transpose), cmap='gray', alpha=1)
plt.imshow(plot_mosaic(betas_vols[...,k], transpose=Transpose), cmap=nice_cmap, alpha=1)
plt.colorbar()
plt.title('Beta (with drift) values for brain voxel related to ' \
+ str(reg_str[k]) + '\n' + d_path['type'] + str(name))
plt.savefig(project_path+'fig/linear_model/mosaic/%s_withdrift_%s'\
%(d_path['type'] + str(name), str(reg_str[k]))+'.png')
plt.close()
#plt.show()
plt.clf()
#Show the middle slice only
plt.imshow(betas_vols[:, :, 18, k], cmap='gray', alpha=0.5)
plt.colorbar()
plt.title('In brain voxel - Slice 18 Projection on %s\n%s'\
%(str(reg_str[k]), d_path['type'] + str(name)))
plt.savefig(\
project_path+'fig/linear_model/mosaic/middle_slice/%s_withdrift_middleslice_%s'\
%(d_path['type'] + str(name), str(k))+'.png')
#plt.show()
plt.clf()
plt.close()
# PCA Analysis
Y_demeaned = Y - np.mean(Y, axis=1).reshape([-1, 1])
unscaled_cov = Y_demeaned.dot(Y_demeaned.T)
U, S, V = npl.svd(unscaled_cov)
projections = U.T.dot(Y_demeaned)
projection_vols = np.zeros(data.shape)
projection_vols[in_brain_mask, :] = projections.T
# Plot the projection of the data on the 5 first principal component
# from SVD
for i in range(1,5):
plt.plot(U[:, i])
plt.title('U' + str(i) + ' vector from SVD \n' + str(name))
plt.imshow(projection_vols[:, :, 18, i])
plt.colorbar()
plt.title('PCA - 18th slice projection on PC#' + str(i) + ' from SVD \n ' +\
d_path['type'] + str(name))
plt.savefig(project_path+'fig/pca/projections/%s_PC#%s.png' \
%((d_path['type'] + str(name),str(i))))
#plt.show()
plt.clf()
plt.close()
# Variance Explained analysis
s = []
#S is diag -> trace = sum of the elements of S
for i in S:
s.append(i/np.sum(S))
np.savetxt(project_path+\
'txt_output/pca/%s_variance_explained' % (d_path['type'] + str(name)) +\
'.txt', np.array(s[:40]))
ind = np.arange(len(s[1:40]))
plt.bar(ind, s[1:40], width=0.5)
plt.xlabel('Principal Components indices')
plt.ylabel('Explained variance in percent')
plt.title('Variance explained graph \n' + (d_path['type'] + str(name)))
plt.savefig(project_path+\
'fig/pca/%s_variance_explained.png' %(d_path['type'] + str(name)))
#plt.show()
plt.close()
# Linear Model - including PCs from PCA analysis
PC = 3 # Number of PCs to include in the design matrix
P_pca = P + PC
X_pca = np.ones((n_trs, P_pca))
for i in range(1,5):
X_pca[:,i] = convolved[:,i]
linear_drift = np.linspace(-1, 1, n_trs)
X_pca[:,5] = linear_drift
quadratic_drift = linear_drift ** 2
quadratic_drift -= np.mean(quadratic_drift)
X_pca[:,6] = quadratic_drift
for i in range(3):
X_pca[:,7+i] = U[:, i]
# Save the design matrix - with PCs
np.savetxt(project_path+'txt_output/pca/%s_design_matrix_pca.txt'\
%(d_path['type'] + str(name)), X_pca)
#plt.imshow(X_pca, aspect=0.25)
B_pca = npl.pinv(X_pca).dot(Y)
np.savetxt(project_path+'txt_output/pca/%s_betas_pca.txt'\
%(d_path['type'] + str(name)), B_pca)
b_pca_vols = np.zeros(vol_shape + (P_pca,))
b_pca_vols[in_brain_mask, :] = B_pca.T
# Save betas as nii files
# Plot - with PCs
# Set regions outside mask as missing with np.nan
mean_data[~in_brain_mask] = np.nan
b_pca_vols[~in_brain_mask] = np.nan
# Plot each slice on the 3rd dimension of the image in a mosaic
for k in range(1,P_pca):
fig = plt.figure(figsize = (8, 5))
#plt.imshow(plot_mosaic(b_pca_vols[...,k], transpose=Transpose), cmap='gray', alpha=0.5)
plt.imshow(plot_mosaic(mean_data, transpose=Transpose), cmap='gray', alpha=1)
plt.imshow(plot_mosaic(b_pca_vols[...,k], transpose=Transpose), cmap=nice_cmap, alpha=1)
plt.colorbar()
plt.title('Beta (with PCA) values for brain voxel related to ' \
+ str(reg_str[k]) + '\n' + d_path['type'] + str(name))
plt.savefig(project_path+'fig/linear_model/mosaic/%s_withPCA_%s'\
%(d_path['type'] + str(name), str(reg_str[k]))+'.png')
#plt.show()
plt.close()
#Show the middle slice only
plt.imshow(b_pca_vols[:, :, 18, k], cmap='gray', alpha=0.5)
plt.colorbar()
plt.title('In brain voxel model - Slice 18 \n' \
'Projection on X%s \n %s'\
%(str(reg_str[k]),d_path['type'] + str(name)))
plt.savefig(\
project_path+\
'fig/linear_model/mosaic/middle_slice/%s_withPCA_middle_slice_%s'\
%(d_path['type'] + str(name), str(k))+'.png')
#plt.show()
plt.clf()
plt.close()
# Residuals
MRSS_dict = {}
MRSS_dict['ds005' + d_path['type']] = {}
MRSS_dict['ds005' + d_path['type']]['drifts'] = {}
MRSS_dict['ds005' + d_path['type']]['pca'] = {}
for z in MRSS_dict['ds005' + d_path['type']]:
MRSS_dict['ds005' + d_path['type']][z]['MRSS'] = []
residuals = Y - X.dot(betas)
df = X.shape[0] - npl.matrix_rank(X)
MRSS = np.sum(residuals ** 2 , axis=0) / df
residuals_pca = Y - X_pca.dot(B_pca)
df_pca = X_pca.shape[0] - npl.matrix_rank(X_pca)
MRSS_pca = np.sum(residuals_pca ** 2 , axis=0) / df_pca
MRSS_dict['ds005' + d_path['type']]['drifts']['mean_MRSS'] = np.mean(MRSS)
MRSS_dict['ds005' + d_path['type']]['pca']['mean_MRSS'] = np.mean(MRSS_pca)
# Save the mean MRSS values to compare the performance
# of the design matrices
for design_matrix, beta, mrss, name in \
[(X, betas, MRSS, 'drifts'), (X_pca, B_pca, MRSS_pca, 'pca')]:
MRSS_dict['ds005' + d_path['type']][name]['p-values'] = []
MRSS_dict['ds005' + d_path['type']][name]['t-test'] = []
with open(project_path+'txt_output/MRSS/ds005%s_MRSS.json'\
%(d_path['type']), 'w') as file_out:
json.dump(MRSS_dict, file_out)
# SE = np.zeros(beta.shape)
# for i in range(design_matrix.shape[-1]):
# c = np.zeros(design_matrix.shape[-1])
# c[i]=1
# c = np.atleast_2d(c).T
# SE[i,:]= np.sqrt(\
# mrss* c.T.dot(npl.pinv(design_matrix.T.dot(design_matrix)).dot(c)))
# zeros = np.where(SE==0)
# SE[zeros] = 1
# t = beta / SE
# t[:,zeros] = 0
# # Get p value for t value using CDF of t didstribution
# ltp = t_dist.cdf(abs(t), df)
# p = 1 - ltp # upper tail
# t_brain = t[in_brain_mask]
# p_brain = p[in_brain_mask]
#
# # Save 3D data in .nii files
# for k in range(1,4):
# t_nib = nib.Nifti1Image(t_brain[..., k], affine)
# nib.save(t-test, project_path+'txt_output/%s/%s_t-test_%s.nii.gz'\
# %(name, d_path['type'] + str(name),str(reg_str[k])))
# p_nib = nib.Nifti1Image(p_brain[..., k], affine)
# nib.save(p-values,project_path+'txt_output/%s/%s_p-values_%s.nii.gz'\
# %(name, d_path['type'] + str(name),str(reg_str[k])))
# pdb.set_trace()
# pdb.set_trace()
print("======================================")
print("\n Noise and PCA analysis done")
print("Design Matrix including drift terms stored in project_epsilon/txt_output/drifts/ \n\n")
print("Design Matrix including PCs terms stored in project_epsilon/txt_output/pca/\n\n")
print("Mean MRSS models results in project_epsilon/txt_output/MRSS/ds005_MRSS.json\n\n")
| bsd-3-clause |
Gabriel-p/mcs_rot_angles | aux_modules/MCs_plane.py | 1 | 38946 |
import numpy as np
from astropy.coordinates import Distance, Angle, SkyCoord
from astropy import units as u
import numpy.linalg as la
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# Define main path.
import os
import sys
r_path = os.path.realpath(__file__)[:-24]
# print r_path
sys.path.insert(0, r_path + '/modules/')
from MCs_data import MCs_data
def rho_phi(coord, glx_ctr):
# Angular distance between point and center of galaxy.
rho = coord.separation(glx_ctr)
# Position angle between center and coordinates. This is the angle between
# the positive y axis (North) counter-clockwise towards the negative x
# axis (East).
Phi = glx_ctr.position_angle(coord)
# This is the angle measured counter-clockwise from the x positive axis
# (West).
phi = Phi + Angle('90d')
return rho, phi
def dist_filter(r_min, r_max, ra_g, dec_g, dm_g, e_dm_g, gal_cent):
"""
Filter clusters based on their projected angular distances 'rho'.
Values are used as: (r_min, r_max]
"""
# Obtain angular projected distance and position angle for the
# clusters in the galaxy.
coords = SkyCoord(list(zip(*[ra_g, dec_g])), unit=(u.deg, u.deg))
rho_g, phi_g = rho_phi(coords, gal_cent)
# age_f, d_d_f, e_dd_f
ra_f, dec_f, dm_f, e_dm_f, rho_f, phi_f = [], [], [], [], [], []
dm_nf, rho_nf, phi_nf = [], [], []
for i, d in enumerate(ra_g):
if r_min < rho_g[i].degree <= r_max:
ra_f.append(ra_g[i])
dec_f.append(dec_g[i])
dm_f.append(dm_g[i])
e_dm_f.append(e_dm_g[i])
rho_f.append(rho_g[i].degree)
phi_f.append(phi_g[i].degree)
else:
dm_nf.append(dm_g[i])
rho_nf.append(rho_g[i].degree)
phi_nf.append(phi_g[i].degree)
rho_f = Angle(rho_f, unit=u.deg)
phi_f = Angle(phi_f, unit=u.deg)
rho_nf = Angle(rho_nf, unit=u.deg)
phi_nf = Angle(phi_nf, unit=u.deg)
return ra_f, dec_f, rho_f, phi_f, dm_f, e_dm_f, rho_nf, phi_nf, dm_nf
def xyz_coords(rho, phi, D_0, r_dist):
'''
Obtain coordinates in the (x,y,z) system of van der Marel & Cioni (2001)
'''
d_kpc = Distance((10**(0.2 * (np.asarray(r_dist) + 5.))) / 1000.,
unit=u.kpc)
x = d_kpc * np.sin(rho.radian) * np.cos(phi.radian)
y = d_kpc * np.sin(rho.radian) * np.sin(phi.radian)
z = D_0 - d_kpc * np.cos(rho.radian)
x, y, z = x.value, y.value, z.value
return np.array([x, y, z])
def mvee(points, tol=0.001):
"""
Find the minimum volume ellipse.
Return A, c where the equation for the ellipse given in "center form" is
(x-c).T * A * (x-c) = 1
http://stackoverflow.com/a/14025140/1391441
"""
points = np.asmatrix(points)
N, d = points.shape
Q = np.column_stack((points, np.ones(N))).T
err = tol + 1.0
u = np.ones(N) / N
while err > tol:
# assert u.sum() == 1 # invariant
X = Q * np.diag(u) * Q.T
M = np.diag(Q.T * la.inv(X) * Q)
jdx = np.argmax(M)
step_size = (M[jdx] - d - 1.0) / ((d + 1) * (M[jdx] - 1.))
new_u = (1 - step_size) * u
new_u[jdx] += step_size
err = la.norm(new_u - u)
u = new_u
c = u * points
A = la.inv(points.T * np.diag(u) * points - c.T * c) / d
return np.asarray(A), np.squeeze(np.asarray(c))
def ellipse(rx, ry, rz, u, v):
x = rx * np.cos(u) * np.cos(v)
y = ry * np.sin(u) * np.cos(v)
z = rz * np.sin(v)
return x, y, z
def get_ellipse(N_ran, rho, phi, D_0, dm_g, e_dm_g):
"""
See: http://stackoverflow.com/a/14025140/1391441
"""
ellip_matrix, centers = [], []
for _ in range(N_ran):
print(_)
# Random draw.
r_dist = np.random.normal(np.asarray(dm_g), np.asarray(e_dm_g))
# r_dist = np.asarray(dm_g) # DEL
x, y, z = xyz_coords(rho, phi, D_0, r_dist)
coords = np.asarray(list(zip(*[x, y, z])))
# A : (d x d) matrix of the ellipse equation in the 'center form':
# (x-c)' * A * (x-c) = 1
# 'centroid' is the center coordinates of the ellipse.
A, centroid = mvee(coords)
ellip_matrix.append(A)
centers.append(centroid)
A = np.mean(ellip_matrix, axis=0)
centroid = np.mean(centers, axis=0)
# V is the rotation matrix that gives the orientation of the ellipsoid.
# https://en.wikipedia.org/wiki/Rotation_matrix
# http://mathworld.wolfram.com/RotationMatrix.html
U, D, V = la.svd(A)
# x, y, z radii.
rx, ry, rz = 1. / np.sqrt(D)
# print 'rads', rx/2., ry/2., rz/2.
u_small, v_small = np.mgrid[0:2 * np.pi:20j, -np.pi / 2:np.pi / 2:10j]
E = np.dstack(ellipse(rx, ry, rz, u_small, v_small))
E = np.dot(E, V) + centroid
x_e, y_e, z_e = np.rollaxis(E, axis=-1)
return x_e, y_e, z_e
def inv_trans_eqs(x_p, y_p, z_p, theta, inc):
"""
Inverse set of equations. Transform inclined plane system (x',y',z')
into face on sky system (x,y,z).
"""
x = x_p * np.cos(theta.radian) -\
y_p * np.cos(inc.radian) * np.sin(theta.radian) -\
z_p * np.sin(inc.radian) * np.sin(theta.radian)
y = x_p * np.sin(theta.radian) +\
y_p * np.cos(inc.radian) * np.cos(theta.radian) +\
z_p * np.sin(inc.radian) * np.cos(theta.radian)
z = -1. * y_p * np.sin(inc.radian) + z_p * np.cos(inc.radian)
return x, y, z
def make_plot(D_0, inc, theta, cl_xyz, cl_xyz_nf, dm_f, x_e, y_e, z_e):
"""
Original link for plotting intersecting planes:
http://stackoverflow.com/a/14825951/1391441
"""
# Make plot.
fig = plt.figure()
ax = Axes3D(fig)
# Plot ellipse.
# ax.plot_surface(x_e, z_e, y_e, cstride=1, rstride=1, alpha=0.05)
# Plot points inside the ellipse, with no random displacement.
x_cl, y_cl, z_cl = cl_xyz
if cl_xyz.size:
SC = ax.scatter(x_cl, z_cl, y_cl, c=dm_f, s=50)
min_X, max_X = min(x_cl) - 2., max(x_cl) + 2.
min_Y, max_Y = min(y_cl) - 2., max(y_cl) + 2.
# # Plot points *outside* the ellipse, with no random displacement.
# x_cl_nf, y_cl_nf, z_cl_nf = cl_xyz_nf
# if cl_xyz_nf.size:
# SC = ax.scatter(x_cl, z_cl_nf, y_cl_nf, c=dm_f, s=50)
# x,y plane.
X, Y = np.meshgrid([min_X, max_X], [min_Y, max_Y])
Z = np.zeros((2, 2))
# Plot x,y plane.
ax.plot_surface(X, Z, Y, color='gray', alpha=.2, linewidth=0, zorder=1)
# Axis of x,y plane.
# x axis.
ax.plot([min_X, max_X], [0., 0.], [0., 0.], ls='--', c='k', zorder=4)
# Arrow head pointing in the positive x direction.
ax.quiver(max_X, 0., 0., max_X, 0., 0., length=0.3,
arrow_length_ratio=1., color='k')
# y axis.
ax.plot([0., 0.], [0., 0.], [0., max_Y], ls='--', c='k')
# Arrow head pointing in the positive y direction.
ax.quiver(0., 0., max_Y, 0., 0., max_Y, length=0.3,
arrow_length_ratio=1., color='k')
ax.plot([0., 0.], [0., 0.], [min_Y, 0.], ls='--', c='k')
#
#
# A plane is a*x+b*y+c*z+d=0, [a,b,c] is the normal.
a, b, c, d = -1. * np.sin(theta.radian) * np.sin(inc.radian),\
np.cos(theta.radian) * np.sin(inc.radian),\
np.cos(inc.radian), 0.
print('a/c,b/c,1,d/c:', a / c, b / c, 1., d / c)
# Rotated plane.
X2_t, Y2_t = np.meshgrid([min_X, max_X], [0, max_Y])
Z2_t = (-a * X2_t - b * Y2_t) / c
X2_b, Y2_b = np.meshgrid([min_X, max_X], [min_Y, 0])
Z2_b = (-a * X2_b - b * Y2_b) / c
# Top half of first x',y' inclined plane.
ax.plot_surface(X2_t, Z2_t, Y2_t, color='red', alpha=.2, lw=0, zorder=3)
# Bottom half of inclined plane.
ax.plot_surface(X2_t, Z2_b, Y2_b, color='red', alpha=.2, lw=0, zorder=-1)
# Axis of x',y' plane.
# x' axis.
x_min, y_min, z_min = inv_trans_eqs(min_X, 0., 0., theta, inc)
x_max, y_max, z_max = inv_trans_eqs(max_X, 0., 0., theta, inc)
ax.plot([x_min, x_max], [z_min, z_max], [y_min, y_max], ls='--', c='b')
# Arrow head pointing in the positive x' direction.
ax.quiver(x_max, z_max, y_max, x_max, z_max, y_max, length=0.3,
arrow_length_ratio=1.2)
# y' axis.
x_min, y_min, z_min = inv_trans_eqs(0., min_Y, 0., theta, inc)
x_max, y_max, z_max = inv_trans_eqs(0., max_Y, 0., theta, inc)
ax.plot([x_min, x_max], [z_min, z_max], [y_min, y_max], ls='--', c='g')
# Arrow head pointing in the positive y' direction.
ax.quiver(x_max, z_max, y_max, x_max, z_max, y_max, length=0.3,
arrow_length_ratio=1.2, color='g')
#
#
# a, b, c, d = pts123_abcd
# print 'a/c,b/c,1,d/c:', a / c, b / c, 1., d / c
# # Plane obtained fitting cloud of clusters.
# X3_t, Y3_t = np.meshgrid([min_X, max_X], [0, max_Y])
# Z3_t = (-a*X3_t - b*Y3_t - d) / c
# X3_b, Y3_b = np.meshgrid([min_X, max_X], [min_Y, 0])
# Z3_b = (-a*X3_b - b*Y3_b - d) / c
# # Top half of second x',y' inclined plane.
# ax.plot_surface(X3_t, Z3_t, Y3_t, color='g', alpha=.2, lw=0, zorder=3)
# # Bottom half of inclined plane.
# ax.plot_surface(X3_b, Z3_b, Y3_b, color='g', alpha=.2, lw=0, zorder=-1)
# # Axis of x',y' plane.
# # x' axis.
# x_min, y_min, z_min = inv_trans_eqs(min_X, 0., 0., theta_pl, inc_pl)
# x_max, y_max, z_max = inv_trans_eqs(max_X, 0., 0., theta_pl, inc_pl)
# ax.plot([x_min, x_max], [z_min, z_max], [y_min, y_max], ls='--', c='b')
# # Arrow head pointing in the positive x' direction.
# ax.quiver(x_max, z_max, y_max, x_max, z_max, y_max, length=0.3,
# arrow_length_ratio=1.2)
# # y' axis.
# x_min, y_min, z_min = inv_trans_eqs(0., min_Y, 0., theta_pl, inc_pl)
# x_max, y_max, z_max = inv_trans_eqs(0., max_Y, 0., theta_pl, inc_pl)
# ax.plot([x_min, x_max], [z_min, z_max], [y_min, y_max], ls='--', c='g')
# # Arrow head pointing in the positive y' direction.
# ax.quiver(x_max, z_max, y_max, x_max, z_max, y_max, length=0.3,
# arrow_length_ratio=1.2, color='g')
ax.set_xlabel('x (Kpc)')
ax.set_ylabel('z (Kpc)')
ax.set_ylim(max_Y, min_Y)
ax.set_zlabel('y (Kpc)')
plt.colorbar(SC, shrink=0.9, aspect=25)
ax.axis('equal')
ax.axis('tight')
# ax.view_init(elev=35., azim=-25.)
plt.show()
# plt.savefig('MCs_bulge_plane.png', dpi=150)
def plot_bulge_plane(ra_g, dec_g, dm_g, e_dm_g, D_0, gal_cent, glx_inc, theta):
"""
"""
# Projected angular distance filter.
r_min, r_max = 0., 20.
ra_f, dec_f, rho_f, phi_f, dm_f, e_dm_f, rho_nf, phi_nf, dm_nf =\
dist_filter(r_min, r_max, ra_g, dec_g, dm_g, e_dm_g, gal_cent)
cl_xyz = xyz_coords(rho_f, phi_f, D_0, dm_f)
cl_xyz_nf = xyz_coords(rho_nf, phi_nf, D_0, dm_nf)
# Number of times the error ellipse will be obtained after randomly
# shifting the distance moduli.
N_ran = 2
x_e, y_e, z_e = get_ellipse(N_ran, rho_f, phi_f, D_0, dm_f, e_dm_f)
make_plot(D_0, glx_inc, theta, cl_xyz, cl_xyz_nf, dm_f, x_e, y_e, z_e)
if __name__ == "__main__":
"""
3D plot of bulge, clusters, and fitted planes.
"""
ra = [[15.5958333333333, 12.1375, 10.9333333333333, 17.225, 15.1416666666667, 15.0041666666667, 14.3416666666667, 13.5625, 357.245833333333, 15.1041666666667, 11.3583333333333, 16.0916666666667, 14.4583333333333, 16.8333333333333, 22.6583333333333, 9.425, 18.875, 18.2583333333333, 13.3541666666667, 24.0041666666667, 11.6833333333333, 23.75, 23.3083333333333, 17.5541666666667, 25.4291666666667, 4.60416666666667, 19.5666666666667, 25.5916666666667, 11.8, 15.2833333333333, 21.2333333333333, 11.475, 29.1833333333333, 18.0166666666667, 5.66666666666667, 14.45, 22.8833333333333, 14.4458333333333, 13.275, 25.6166666666667, 11.2166666666667, 13.1458333333333, 10.7458333333333, 13.8875, 15.9708333333333, 14.425, 6.17916666666667, 20.7, 18.2125, 27.3666666666667, 5.3625, 10.35, 23.6083333333333, 5.76666666666667, 17.0791666666667, 15.1458333333333, 27.5791666666667, 11.9583333333333, 16.0166666666667, 12.3625, 17.6958333333333, 15.2333333333333, 15.4916666666667, 11.5041666666667, 14.325, 15.2041666666667, 17.0583333333333, 14.0583333333333, 16.8791666666667, 16.7375, 13.6291666666667, 12.5875, 14.3333333333333, 15.35, 17.2583333333333, 12.325, 15.1375, 10.8875, 12.1541666666667, 11.775, 16.2583333333333, 11.7291666666667, 10.9083333333333, 12.05, 11.8541666666667, 12.0041666666667, 15.7958333333333, 17.2541666666667, 15.0583333333333], [76.4166666666667, 74.225, 82.9916666666667, 78.8541666666667, 76.1416666666667, 88.0458333333333, 72.3083333333333, 76.5083333333333, 78.8125, 87.7, 76.9458333333333, 77.3, 76.1375, 77.2208333333333, 73.9208333333333, 86.4583333333333, 76.9833333333333, 83.3333333333333, 72.7958333333333, 77.7333333333333, 86.0458333333333, 72.2791666666667, 72.5875, 77.625, 86.7166666666667, 71.8583333333333, 72.6208333333333, 78.9458333333333, 76.975, 74.725, 86.7125, 73.7625, 72.25, 94.3291666666667, 76.5375, 91.8708333333333, 74.5583333333333, 93.4833333333333, 72.1541666666667, 70.8083333333333, 73.4625, 79.7583333333333, 76.55, 82.9416666666667, 74.5416666666667, 76.9416666666667, 78.1041666666667, 74.3916666666667, 76.6416666666667, 85.9833333333333, 81.55, 74.9416666666667, 69.925, 79.5208333333333, 82.6416666666667, 74.9083333333333, 82.2083333333333, 95.3916666666667, 79.4541666666667, 67.6666666666667, 77.7958333333333, 85.375, 77.3958333333333, 76.4708333333333, 69.4125, 76.5125, 74.8083333333333, 76.6041666666667, 85.8958333333333, 82.4833333333333, 79.1666666666667, 77.7875, 78.45, 76.125, 82.925, 73.1875, 82.85, 72.7, 92.2208333333333, 93.9875, 88.8958333333333, 85.8333333333333, 74.1208333333333, 84.7833333333333, 80.05, 72.4125, 73.55, 71.5166666666667, 77.3458333333333, 77.9208333333333, 77.65, 81.3625, 74.7125, 81.1166666666667, 79.2333333333333, 81.125, 68.9083333333333, 93.6166666666667, 91.6291666666667, 74.3583333333333, 73.7541666666667, 83.0125, 86.55, 93.6708333333333, 74.9708333333333, 76.8958333333333, 79.2208333333333, 79.0708333333333, 77.9166666666667, 82.4416666666667, 77.3125, 83.2541666666667, 77.5083333333333, 83.6625, 74.5625, 80.4375, 79.6708333333333, 85.4916666666667, 71.6041666666667, 73.225, 76.9041666666667, 76.4, 86.4833333333333, 85.1083333333333, 77.6666666666667, 74.5916666666667, 77.7208333333333, 73.9666666666667, 82.3333333333333, 85.4541666666667, 77.6333333333333, 79.1125, 80.0083333333333, 71.875, 88.925, 85.6208333333333, 84.4416666666667, 86.3625, 80.8, 83.0958333333333, 77.2125, 82.9625, 76.8375, 76.6708333333333, 83.5541666666667, 82.4958333333333, 85.4125, 82.4125, 76.3541666666667, 76.6416666666667]]
dec = [[-72.0030555555556, -73.3069444444444, -72.9766666666667, -73.2416666666667, -72.3655555555556, -72.3688888888889, -71.8913888888889, -72.2413888888889, -72.9452777777778, -71.2947222222222, -73.4813888888889, -72.8477777777778, -72.9436111111111, -73.3775, -76.0544444444444, -73.9083333333333, -71.1788888888889, -70.9627777777778, -72.1963888888889, -75.4577777777778, -72.0630555555556, -75.5566666666667, -74.1672222222222, -73.2091666666667, -71.1611111111111, -74.3186111111111, -72.0016666666667, -74.1733333333333, -73.4772222222222, -74.0736111111111, -71.1836111111111, -73.5066666666667, -74.2194444444445, -75.1975, -75.0747222222222, -73.4216666666667, -71.9527777777778, -74.3266666666667, -73.3802777777778, -71.2791666666667, -73.0019444444445, -72.1930555555556, -72.5886111111111, -74.0636111111111, -72.8261111111111, -74.4727777777778, -73.755, -75.0016666666667, -73.1194444444444, -73.7283333333333, -73.7486111111111, -72.8908333333333, -72.8744444444444, -73.6697222222222, -72.8841666666667, -71.4608333333333, -74.3561111111111, -73.4783333333333, -74.6191666666667, -73.3980555555556, -72.7919444444444, -73.1516666666667, -71.0202777777778, -73.3955555555556, -72.9327777777778, -73.3488888888889, -73.2569444444444, -74.1561111111111, -73.1197222222222, -73.235, -74.1852777777778, -73.3872222222222, -71.1702777777778, -73.2402777777778, -73.0863888888889, -73.3716666666667, -72.2583333333333, -73.4388888888889, -73.4155555555556, -73.3730555555556, -73.0427777777778, -73.4233333333333, -73.4405555555556, -73.4463888888889, -73.4580555555556, -73.4861111111111, -72.2736111111111, -73.2066666666667, -72.4583333333333], [-68.6394444444445, -68.0022222222222, -67.9716666666667, -68.6811111111111, -68.2083333333333, -71.8583333333333, -72.0566666666667, -68.0263888888889, -68.8825, -71.7077777777778, -66.7980555555556, -68.4441666666667, -67.9755555555556, -68.0836111111111, -67.7833333333333, -69.3802777777778, -67.3577777777778, -68.1522222222222, -67.5347222222222, -67.6266666666667, -69.3333333333333, -67.3416666666667, -72.8275, -68.4005555555556, -69.1897222222222, -67.6597222222222, -67.3258333333333, -69.1919444444445, -67.9288888888889, -67.8469444444445, -69.4197222222222, -67.9644444444444, -72.64, -70.0608333333333, -68.4458333333333, -72.4941666666667, -67.7683333333333, -72.5052777777778, -68.5594444444444, -73.8119444444444, -69.5719444444444, -69.0011111111111, -68.0630555555556, -68.2355555555556, -68.0602777777778, -67.8613888888889, -68.7719444444444, -65.2677777777778, -68.3630555555556, -69.1805555555556, -70.9813888888889, -69.8011111111111, -74.0172222222222, -69.1716666666667, -63.2033333333333, -69.5575, -71.6327777777778, -72.79, -68.4727777777778, -66.9569444444444, -67.6266666666667, -69.185, -67.8105555555556, -67.0494444444445, -66.1994444444444, -68.6283333333333, -67.9083333333333, -68.375, -66.2086111111111, -72.0547222222222, -70.5408333333333, -67.6825, -66.62, -68.3497222222222, -72.1461111111111, -72.5177777777778, -72.0425, -72.5766666666667, -72.3838888888889, -70.0730555555556, -62.3452777777778, -66.2622222222222, -67.6227777777778, -74.8533333333333, -68.9041666666667, -72.2480555555556, -69.8069444444444, -66.9113888888889, -67.7783333333333, -67.5655555555556, -70.4875, -73.5702777777778, -69.9577777777778, -67.7286111111111, -68.6827777777778, -67.6780555555556, -73.7316666666667, -72.6094444444445, -72.2263888888889, -67.6852777777778, -67.7141666666667, -64.2422222222222, -69.0825, -69.8019444444445, -67.9236111111111, -67.4608333333333, -69.15, -69.1541666666667, -68.7266666666667, -71.0005555555556, -67.6997222222222, -67.8491666666667, -66.6991666666667, -68.3055555555556, -68.0491666666667, -68.9172222222222, -69.0794444444444, -69.0475, -72.5683333333333, -72.1725, -68.5419444444444, -68.6286111111111, -69.2719444444444, -69.2486111111111, -68.7536111111111, -69.8030555555556, -67.4711111111111, -69.7058333333333, -70.5794444444445, -68.9208333333333, -66.94, -69.0802777777778, -69.2611111111111, -72.5883333333333, -74.3538888888889, -65.3627777777778, -74.7827777777778, -69.3452777777778, -70.7777777777778, -67.9969444444444, -67.9802777777778, -67.9911111111111, -66.8291666666667, -67.8422222222222, -67.8563888888889, -67.8788888888889, -69.2294444444444, -70.9838888888889, -68.5005555555556, -68.4272222222222]]
dist_cent = [[1633.20060682, 2320.18925372, 2441.9621715, 1471.75739309, 2376.23276698, 769.377260812, 1217.10160803, 1804.52411396, 5941.22951677, 2131.04017704, 909.015121115, 3069.45508982, 2364.25345255, 2670.26753775, 4890.13489057, 2343.49016847, 3159.23688725, 2711.83412564, 1818.55754823, 4455.35179015, 1123.62228704, 4757.29044595, 3641.00953753, 2648.4875134, 4474.93618064, 3141.57483709, 3560.24707724, 4473.53808798, 1395.16822521, 1590.48289743, 4018.53487003, 1077.40931903, 5110.49822201, 3786.85646257, 4265.8368639, 949.628191291, 3943.07061992, 1767.67615703, 1299.74797416, 4641.567255, 2418.05685486, 897.32557739, 3039.33043949, 1461.11210457, 1901.98375256, 2165.3084445, 2985.15151754, 3722.76959311, 2000.6292342, 4885.31051319, 2625.48765461, 1076.03465615, 3317.63420596, 2482.45477321, 1670.70686978, 1704.67888631, 4679.63812859, 3032.23144391, 2116.49256805, 2422.17347074, 1542.38398274, 3014.72270399, 2369.65890352, 1867.37485958, 688.161984621, 2390.12368317, 1417.8418462, 2759.56312304, 1647.94174103, 2532.76451148, 2666.69527657, 1799.77351614, 1911.04658283, 2378.98229824, 1319.33831563, 646.650576733, 1952.83158151, 2437.45811475, 1828.61711731, 1838.06758207, 1147.07949676, 2459.7173664, 1123.54113051, 1845.94527547, 2373.7888037, 2374.78095982, 1177.87078532, 1347.286255, 724.047475304], [2094.92442356, 2592.17806506, 1926.01374032, 1393.13441938, 2375.35464572, 3301.57725704, 3191.52180512, 1846.0135317, 716.76297379, 3154.98748878, 3076.59112567, 2026.05665608, 2404.83900491, 1555.09594563, 2707.74040478, 2869.88446607, 2126.74995801, 1875.64558761, 3013.12651671, 1938.91000725, 2787.32652108, 3248.71116208, 3895.8901948, 2000.81445911, 2338.12168352, 3264.69637398, 3074.99893799, 2636.47366843, 2026.34617439, 2555.05591583, 1987.45137574, 2962.78454192, 3645.08442741, 4482.58313968, 1839.78891743, 4308.71233216, 2341.82811636, 4655.60143549, 2645.31421226, 4649.97712828, 2659.06169124, 495.958510033, 1712.84745196, 2951.12877171, 2751.29010002, 1869.62116249, 1815.09401991, 4178.76760298, 1500.54520148, 1794.75124939, 2529.48578966, 1640.54629482, 5595.45850131, 776.552314833, 5577.42226175, 2249.44973927, 3280.94442372, 5192.25922245, 2306.05901115, 4799.58913132, 1930.71867668, 1618.96956185, 2738.38874315, 2453.83795634, 4563.65453258, 1390.93463993, 2210.73606753, 2156.22099955, 3489.11690674, 2339.47934036, 1913.24925842, 1765.47926278, 2664.78715134, 1747.9825195, 2559.15521594, 3985.98208869, 2470.42710796, 3449.61772162, 4247.88799427, 4176.76382235, 6981.56554962, 3438.49595369, 2544.39520164, 4846.09325026, 859.207154012, 4248.51583614, 3334.30573069, 3728.40654495, 1759.97165254, 2178.23559988, 1194.19833024, 3858.08899466, 1864.27857663, 1561.90783843, 2706.82540924, 1747.18252909, 4836.99690823, 4641.18832985, 4140.00763706, 2964.26958036, 3078.63612913, 4672.26090061, 2310.20822426, 4180.41248267, 2162.15029365, 2072.33397643, 1238.51032921, 832.357478845, 1488.41649353, 2593.59405836, 1824.3311299, 2309.75855988, 2853.29775447, 2151.58363423, 2434.44194297, 2082.03137409, 1198.78596379, 1997.28834526, 3626.80120077, 3532.99815015, 2924.8945852, 2101.1318558, 2466.86972041, 2506.56843199, 2352.67147924, 2754.38598176, 1932.30496879, 2459.48921061, 1639.6977746, 1695.42812081, 2380.69363485, 2640.55720245, 2121.15585755, 3588.5380412, 4855.28448834, 4857.60200313, 4765.37072491, 1887.71370931, 1942.00303845, 1926.04452804, 2570.73288389, 1574.74408319, 3067.96627803, 1827.77703955, 1778.39064842, 2280.49016348, 1949.46253097, 2136.73909356, 1495.06656829, 1870.72119252]]
e_d_cent = [[1039.173740299393, 1435.9536198970777, 1778.3059807875993, 833.5600644307985, 944.8627199983013, 9.540124318824205, 823.8633204223586, 1401.4181419730958, 827.5312450161011, 1163.202855888643, 13.237829767759157, 1792.3207837264274, 1560.168479675753, 1148.1705423534424, 755.9245287150616, 892.191969353227, 884.4665374591882, 415.35433225392086, 1875.7404289875428, 442.41685887935273, 797.3852853246149, 893.6966802217544, 715.515865771003, 1468.0163280348056, 180.07593240624257, 310.93678012208176, 1539.3048577844493, 645.5935589311879, 1221.0569425039175, 622.8771608179757, 777.3761195766289, 830.7302823568057, 74.42330047098997, 821.8292306571432, 1103.510336489361, 940.0405957271148, 822.4108254121722, 608.4887378615729, 1148.9388310016825, 587.5570422769338, 1526.421120290217, 1168.2108316701813, 1273.2683945252095, 393.432086570192, 1557.4353087829875, 844.2681890359437, 770.8856764819485, 760.2515378456518, 910.6248838134877, 598.6815415818562, 38.23452198067387, 978.0345113217967, 34.163250596628764, 36.151558902120726, 1015.1877769341709, 494.3376980922027, 68.14875955525937, 1541.2904129789106, 17.571308861202773, 1792.3673345822394, 458.22021965535055, 1283.3482722452554, 825.5956710209496, 1353.1526557220127, 1291.585217521375, 1159.6867266474756, 599.8811014721533, 1580.683975865727, 857.1852247427792, 1537.8170745504758, 1243.6395933750302, 1169.2812348399343, 365.9048145832149, 1640.885818719533, 13.585851453369786, 5.368550393503098, 1899.2919526321182, 1839.912870204379, 1621.6520186557234, 1144.4604372844262, 875.8209539282942, 1765.885395992803, 762.8973515574974, 1847.1410426886998, 1644.595816646902, 1643.8858310928317, 489.95206768810164, 16.70608038529138, 10.544177978144234], [1785.2629840823188, 1041.566284758639, 1467.6674047933768, 1923.8136494672647, 1716.0321102312591, 915.244895489895, 221.1675461512587, 861.867312378096, 840.6042035290191, 844.2414907308923, 1314.2330413768143, 1770.5151998468243, 1633.4007149260217, 368.65492682278284, 1058.123240416804, 1840.4439280460017, 284.2266672489778, 1506.0332777066492, 514.4581837549737, 878.2567435346746, 1708.3601232217165, 472.68214070414155, 997.4875266424194, 1954.127788865912, 1228.248441899879, 856.8220621495436, 221.15248443469196, 2353.736947917064, 1397.013963090411, 1120.4495754771256, 299.74838064309364, 1170.9093515493155, 414.38923232920945, 912.2142994105723, 1534.6450360693136, 645.6729294664426, 264.5515241024804, 345.43316624042524, 247.75738755022581, 653.3364213144663, 1445.3983698014852, 1050.9281645823662, 344.67425076422523, 1995.0077769507548, 1475.4497099997725, 825.5266504486219, 2150.14206872408, 454.5676595110679, 368.51923389480214, 336.38429850370164, 1890.5055794165903, 351.828770708083, 994.9605893924067, 2229.0283582517486, 548.3166216178901, 1742.920966906669, 1984.4888736732746, 566.106410249732, 2434.639259730216, 622.6818281879467, 936.9663122493897, 318.60972914001735, 1931.1872592082586, 165.28422562460204, 89.07313767059404, 423.11626224110586, 243.6303481764325, 1816.3310639967779, 435.7639805821094, 270.7290811126059, 2139.7993528686457, 310.6312946105225, 629.1766510798647, 907.0254731098203, 580.6065658166992, 1309.0795526339405, 681.692149849808, 155.04438853570673, 92.31684124911177, 136.71413051358266, 630.9594773060937, 443.11461492136965, 209.3880413379435, 110.5204345895767, 2013.1230449025077, 1518.1460692179344, 1951.8103229039705, 762.6417741189457, 290.3259190547951, 1400.7507173640981, 442.85762169709034, 1086.835709620925, 914.7013000175225, 367.2400890475026, 2182.627764031268, 1037.4172952821752, 110.51264193361975, 198.1143051051561, 698.273859249986, 1365.9485757880254, 1122.979469109, 413.50630387776516, 1243.5706321936586, 692.719607065467, 229.5825564846332, 261.3031238931711, 2347.1190868041, 2194.0422166832936, 1670.1045987321418, 1841.9016208506491, 299.8714988772865, 1624.557624100974, 1063.2981681602307, 1820.151128560012, 1081.256744265969, 2280.823993202776, 2062.179945562474, 1302.6619137307714, 206.9994601689073, 1212.1724246853282, 2013.6803279587316, 1956.0130420885787, 1593.5985662002408, 1725.1768982775568, 2138.5128286813383, 1919.6897087683137, 315.76110544137276, 1470.8243533267491, 1766.3716811308943, 332.203147610922, 224.8820964437035, 2478.4850127560644, 2377.8401976977834, 207.7094605398353, 132.62699690261672, 1238.1214902654456, 110.50946129093892, 293.3991946533112, 2113.9358140607815, 1403.4104181381058, 1943.7259970491498, 351.0171521341531, 1318.071612339641, 331.1161250625808, 286.95452010783447, 1695.0764266614954, 1387.1609196274565, 1682.152980716172, 395.94052375974496, 1337.190585009628]]
aarr = [[[9.0, 8.9, 8.75, 9.9, 8.2, 8.1, 9.1, 7.9, 9.6, 9.6, 8.9, 9.6, 9.1, 9.2, 9.2, 9.45, 9.1, 9.3, 7.4, 9.6, 9.85, 9.1, 9.7, 9.55, 9.35, 9.15, 9.4, 9.0, 8.45, 9.7, 8.95, 8.0, 7.95, 9.45, 9.45, 9.65, 9.0, 9.05, 8.8, 9.3, 9.05, 8.0, 8.9, 9.5, 7.8, 9.8, 9.15, 9.8, 9.6, 9.55, 9.6, 9.45, 9.7, 9.75, 8.95, 9.6, 8.2, 8.8, 9.2, 8.7, 8.3, 8.4, 9.1, 8.5, 8.8, 8.35, 8.75, 9.6, 8.8, 8.3, 9.4, 8.3, 8.4, 8.0, 8.0, 8.5, 7.8, 8.7, 8.0, 8.4, 8.0, 8.1, 8.0, 8.0, 8.2, 6.9, 7.0, 7.2, 6.7], [9.20411998265592, 9.14612803567824, 8.79934054945358, 9.82607480270083, 8.04139268515822, 8.09691001300806, 8.79934054945358, 7.90308998699194, 9.77815125038364, 9.73239375982297, 9.17609125905568, 9.36172783601759, 9.30102999566398, 9.39794000867204, 9.20411998265592, 9.32221929473392, 9.10037054511756, 9.44715803134222, 7.39794000867204, 9.81291335664286, 9.77815125038364, 9.04139268515822, 9.47712125471966, 9.73239375982297, 9.07918124604762, 9.06069784035361, 9.32221929473392, 9.04139268515822, 8.39794000867204, 9.96848294855393, 8.77815125038364, 8.20411998265593, 8.04139268515822, 9.57978359661681, 9.61278385671973, 9.49136169383427, 9.14612803567824, 9.30102999566398, 8.5051499783199, 9.25527250510331, 9.07918124604762, 8.14612803567824, 9.0, 9.68124123737559, 7.39794000867204, 9.73239375982297, 9.30102999566398, 9.67209785793572, 9.63346845557959, 9.77815125038364, 9.49136169383427, 9.32221929473392, 9.80617997398389, 9.51851393987789, 9.07918124604762, 9.77815125038364, 8.14612803567824, 8.69897000433602, 9.44715803134222, 8.35, 8.25, 8.4, 9.15, 8.4, 8.65, 8.1, 8.65, 9.45, 8.45, 8.1, 9.25, 8.4, 7.9, 8.1, 8.3, 8.05, 7.7, 8.35, 7.9, 8.1, 8.0, 7.9, 7.8, 8.1, 8.4, 8.34242268082221, 7.9, 8.15, 8.1]], [[8.45, 8.2, 8.2, 8.7, 8.9, 9.3, 8.9, 9.15, 8.4, 8.6, 8.8, 9.1, 8.95, 8.8, 8.3, 8.6, 9.0, 9.5, 8.8, 8.6, 8.7, 9.1, 9.05, 9.1, 9.2, 8.6, 9.0, 8.7, 8.3, 9.05, 9.1, 9.1, 9.0, 9.0, 8.2, 9.2, 8.9, 9.05, 9.05, 9.3, 9.1, 9.15, 8.65, 9.4, 9.2, 9.05, 8.8, 9.5, 7.9, 9.0, 8.4, 8.95, 9.2, 8.3, 9.4, 8.3, 8.9, 9.05, 9.5, 9.2, 9.05, 8.0, 8.2, 9.1, 9.15, 8.3, 8.9, 8.65, 9.1, 8.85, 9.1, 8.7, 8.8, 9.0, 9.3, 9.25, 8.75, 9.05, 9.3, 9.4, 9.3, 9.45, 8.95, 9.3, 8.0, 9.3, 9.1, 8.65, 9.1, 8.6, 8.95, 9.4, 9.15, 9.5, 8.75, 8.8, 9.55, 9.15, 9.2, 9.1, 9.15, 9.2, 9.45, 9.2, 9.15, 9.0, 8.8, 8.0, 8.0, 9.5, 7.9, 9.0, 8.3, 8.8, 8.4, 8.8, 8.85, 8.1, 9.55, 8.3, 9.15, 8.8, 8.2, 7.9, 8.8, 8.3, 8.9, 8.1, 8.3, 8.0, 8.4, 8.7, 8.6, 9.25, 9.2, 9.45, 9.25, 8.8, 9.1, 8.8, 9.55, 7.9, 8.6, 9.1, 7.0, 6.7, 7.7, 7.3, 8.85, 7.5], [8.0, 8.1, 8.25, 8.69897000433602, 9.14612803567824, 9.30102999566398, 9.14612803567824, 9.11394335230684, 8.20411998265593, 8.5051499783199, 8.95424250943932, 9.1, 8.75, 8.60205999132796, 8.35, 8.45, 9.04139268515822, 9.39794000867204, 8.60205999132796, 8.69897000433602, 8.7, 9.09691001300805, 9.11394335230684, 8.89762709129044, 9.30102999566398, 8.55, 8.9, 8.75, 8.25, 9.23044892137827, 9.0, 8.89762709129044, 9.04139268515822, 9.14612803567824, 8.0, 9.25527250510331, 8.69897000433602, 9.0, 8.84509804001426, 9.20411998265592, 9.0, 9.11394335230684, 8.55, 9.23044892137827, 9.14612803567824, 9.17609125905568, 8.69897000433602, 9.34242268082221, 7.9, 9.09691001300805, 8.4, 9.20411998265592, 9.39794000867204, 8.3, 9.41497334797082, 8.05, 8.95424250943932, 8.79934054945358, 9.20411998265592, 9.20411998265592, 9.04139268515822, 8.25, 8.25, 9.14612803567824, 9.17609125905568, 8.09691001300806, 8.69897000433602, 8.5051499783199, 9.17609125905568, 8.84509804001426, 9.17609125905568, 8.60205999132796, 8.79934054945358, 8.7481880270062, 9.27875360095283, 9.20411998265592, 8.60205999132796, 8.95424250943932, 9.23044892137827, 9.14612803567824, 9.38021124171161, 9.36172783601759, 8.85125834871907, 9.25527250510331, 8.2, 9.20411998265592, 9.11394335230684, 8.6, 9.14612803567824, 8.60205999132796, 9.07918124604762, 9.25527250510331, 9.17609125905568, 9.34242268082221, 8.65321251377534, 8.69897000433602, 9.39794000867204, 9.04139268515822, 9.25527250510331, 9.20411998265592, 9.20411998265592, 9.23044892137827, 9.36172783601759, 9.23044892137827, 9.17609125905568, 9.14612803567824, 8.3, 8.3, 7.69897000433602, 9.25527250510331, 8.14612803567824, 9.0, 8.14612803567824, 8.60205999132796, 8.45, 8.55, 8.9, 8.2, 9.30102999566398, 8.39794000867204, 9.07918124604762, 8.60205999132796, 8.1, 8.25, 8.39794000867204, 8.45, 8.65321251377534, 7.95, 8.11394335230684, 8.39794000867204, 8.0, 8.20411998265593, 8.1, 9.11394335230684, 9.04139268515822, 9.47712125471966, 9.20411998265592, 8.60205999132796, 9.20411998265592, 8.79934054945358, 9.25527250510331, 8.15, 8.34242268082221, 9.0, 8.15, 8.3, 8.25, 7.9, 7.69897000433602, 8.35]]]
darr = [[[18.92, 18.88, 19.04, 18.98, 18.88, 18.96, 18.94, 18.9, 19.06, 19.0, 18.96, 19.06, 19.04, 19.04, 18.88, 18.9, 19.02, 18.98, 18.9, 19.0, 18.98, 18.88, 19.0, 18.88, 18.94, 18.98, 18.86, 19.02, 18.92, 18.94, 19.04, 18.98, 18.96, 19.04, 18.86, 18.98, 18.88, 18.98, 19.0, 19.0, 19.04, 18.98, 19.06, 18.94, 18.9, 19.0, 19.02, 19.02, 19.0, 19.02, 18.96, 18.98, 18.96, 18.96, 18.92, 18.94, 18.96, 19.06, 18.96, 19.04, 18.94, 19.06, 18.92, 18.9, 18.98, 18.88, 18.94, 19.04, 18.92, 18.88, 18.88, 18.9, 18.94, 18.88, 18.96, 18.96, 19.02, 18.88, 18.9, 18.9, 18.94, 19.04, 18.94, 18.9, 18.88, 18.88, 18.94, 18.96, 18.96], [18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9]], [[18.42, 18.54, 18.44, 18.44, 18.56, 18.54, 18.48, 18.46, 18.48, 18.54, 18.56, 18.42, 18.42, 18.48, 18.44, 18.58, 18.48, 18.44, 18.52, 18.52, 18.58, 18.52, 18.42, 18.42, 18.54, 18.44, 18.48, 18.6, 18.44, 18.54, 18.48, 18.56, 18.52, 18.42, 18.44, 18.44, 18.48, 18.52, 18.48, 18.44, 18.56, 18.48, 18.5, 18.6, 18.56, 18.46, 18.42, 18.46, 18.48, 18.48, 18.58, 18.48, 18.6, 18.52, 18.44, 18.42, 18.6, 18.44, 18.58, 18.54, 18.52, 18.5, 18.58, 18.5, 18.5, 18.48, 18.5, 18.42, 18.52, 18.48, 18.56, 18.5, 18.52, 18.46, 18.52, 18.58, 18.52, 18.5, 18.5, 18.5, 18.42, 18.52, 18.5, 18.5, 18.52, 18.6, 18.6, 18.44, 18.5, 18.54, 18.5, 18.42, 18.52, 18.48, 18.6, 18.52, 18.5, 18.48, 18.44, 18.56, 18.56, 18.46, 18.54, 18.44, 18.5, 18.5, 18.54, 18.52, 18.44, 18.58, 18.5, 18.42, 18.54, 18.42, 18.44, 18.4, 18.44, 18.44, 18.48, 18.56, 18.6, 18.42, 18.42, 18.4, 18.58, 18.58, 18.48, 18.42, 18.54, 18.48, 18.5, 18.6, 18.58, 18.48, 18.5, 18.6, 18.5, 18.48, 18.42, 18.44, 18.4, 18.5, 18.56, 18.48, 18.5, 18.56, 18.44, 18.42, 18.48, 18.54], [18.5, 18.5, 18.5, 18.5, -9999999999.9, 18.5, 18.5, -9999999999.9, 18.5, 18.5, 18.5, 18.5, 18.5, 18.5, 18.5, 18.5, 18.5, -9999999999.9, 18.5, 18.5, 18.5, -9999999999.9, 18.5, 18.5, -9999999999.9, 18.5, 18.5, 18.5, 18.5, -9999999999.9, 18.5, 18.5, 18.5, 18.5, 18.5, -9999999999.9, 18.5, 18.5, 18.5, -9999999999.9, 18.5, -9999999999.9, 18.5, -9999999999.9, -9999999999.9, -9999999999.9, 18.5, 18.5, 18.5, -9999999999.9, 18.5, -9999999999.9, -9999999999.9, 18.5, 18.5, 18.5, 18.5, 18.5, 18.5, -9999999999.9, 18.5, 18.5, 18.5, 18.5, -9999999999.9, 18.5, 18.5, 18.5, 18.5, 18.5, -9999999999.9, 18.5, 18.5, 18.5, 18.5, 18.5, 18.5, 18.5, -9999999999.9, 18.5, 18.5, 18.5, 18.5, -9999999999.9, 18.5, 18.5, -9999999999.9, 18.5, -9999999999.9, 18.5, -9999999999.9, 18.5, -9999999999.9, 18.5, 18.5, 18.5, -9999999999.9, 18.5, -9999999999.9, -9999999999.9, -9999999999.9, 18.5, -9999999999.9, -9999999999.9, -9999999999.9, -9999999999.9, 18.5, 18.5, 18.5, -9999999999.9, 18.5, 18.5, 18.5, 18.5, 18.5, 18.5, 18.5, 18.5, 18.5, 18.5, 18.5, 18.5, 18.5, 18.5, 18.5, 18.5, 18.5, 18.5, 18.5, 18.5, 18.5, 18.5, 18.5, 18.5, 18.5, 18.5, -9999999999.9, 18.5, -9999999999.9, 18.5, -9999999999.9, 18.5, 18.5, 18.5, 18.5, 18.5, 18.5, 18.5, 18.5, 18.5]]]
dsigma = [[[0.05, 0.05, 0.06, 0.07, 0.03, 0.05, 0.06, 0.05, 0.05, 0.07, 0.06, 0.06, 0.05, 0.04, 0.06, 0.04, 0.05, 0.06, 0.07, 0.05, 0.05, 0.07, 0.07, 0.06, 0.06, 0.05, 0.07, 0.05, 0.05, 0.06, 0.04, 0.05, 0.06, 0.04, 0.06, 0.05, 0.05, 0.06, 0.04, 0.07, 0.05, 0.06, 0.04, 0.03, 0.06, 0.05, 0.04, 0.05, 0.05, 0.05, 0.06, 0.06, 0.04, 0.06, 0.05, 0.05, 0.06, 0.05, 0.03, 0.06, 0.04, 0.04, 0.06, 0.05, 0.05, 0.04, 0.05, 0.06, 0.04, 0.06, 0.05, 0.04, 0.04, 0.06, 0.04, 0.03, 0.07, 0.07, 0.06, 0.04, 0.06, 0.06, 0.05, 0.07, 0.06, 0.06, 0.03, 0.05, 0.06], [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]], [[0.05, 0.05, 0.06, 0.05, 0.06, 0.07, 0.06, 0.04, 0.07, 0.05, 0.06, 0.04, 0.06, 0.05, 0.06, 0.06, 0.05, 0.06, 0.05, 0.06, 0.04, 0.05, 0.05, 0.06, 0.06, 0.05, 0.04, 0.05, 0.06, 0.06, 0.05, 0.03, 0.05, 0.06, 0.06, 0.03, 0.05, 0.06, 0.06, 0.06, 0.05, 0.04, 0.07, 0.04, 0.06, 0.03, 0.06, 0.06, 0.04, 0.06, 0.04, 0.05, 0.04, 0.06, 0.04, 0.06, 0.06, 0.03, 0.07, 0.07, 0.07, 0.05, 0.06, 0.03, 0.04, 0.06, 0.06, 0.06, 0.05, 0.06, 0.06, 0.06, 0.06, 0.04, 0.04, 0.06, 0.06, 0.06, 0.04, 0.06, 0.05, 0.05, 0.06, 0.05, 0.06, 0.06, 0.06, 0.05, 0.05, 0.07, 0.05, 0.07, 0.06, 0.05, 0.04, 0.07, 0.05, 0.07, 0.05, 0.06, 0.03, 0.05, 0.06, 0.05, 0.05, 0.06, 0.06, 0.07, 0.03, 0.04, 0.06, 0.05, 0.07, 0.06, 0.04, 0.05, 0.03, 0.04, 0.04, 0.07, 0.04, 0.07, 0.06, 0.02, 0.05, 0.06, 0.06, 0.04, 0.06, 0.04, 0.06, 0.06, 0.05, 0.04, 0.06, 0.05, 0.05, 0.02, 0.07, 0.05, 0.06, 0.06, 0.06, 0.06, 0.05, 0.05, 0.05, 0.04, 0.06, 0.03], [0.1, 0.1, 0.1, 0.1, -9999999999.9, 0.1, 0.1, -9999999999.9, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, -9999999999.9, 0.1, 0.1, 0.1, -9999999999.9, 0.1, 0.1, -9999999999.9, 0.1, 0.1, 0.1, 0.1, -9999999999.9, 0.1, 0.1, 0.1, 0.1, 0.1, -9999999999.9, 0.1, 0.1, 0.1, -9999999999.9, 0.1, -9999999999.9, 0.1, -9999999999.9, -9999999999.9, -9999999999.9, 0.1, 0.1, 0.1, -9999999999.9, 0.1, -9999999999.9, -9999999999.9, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, -9999999999.9, 0.1, 0.1, 0.1, 0.1, -9999999999.9, 0.1, 0.1, 0.1, 0.1, 0.1, -9999999999.9, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, -9999999999.9, 0.1, 0.1, 0.1, 0.1, -9999999999.9, 0.1, 0.1, -9999999999.9, 0.1, -9999999999.9, 0.1, -9999999999.9, 0.1, -9999999999.9, 0.1, 0.1, 0.1, -9999999999.9, 0.1, -9999999999.9, -9999999999.9, -9999999999.9, 0.1, -9999999999.9, -9999999999.9, -9999999999.9, -9999999999.9, 0.1, 0.1, 0.1, -9999999999.9, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, -9999999999.9, 0.1, -9999999999.9, 0.1, -9999999999.9, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]]]
j = 1
inc_pa_fit_angles = [[Angle('40.d'), Angle('108.d')],
[Angle('18.336d'), Angle('153.06d')]]
glx_inc, glx_PA = inc_pa_fit_angles[j]
# Equatorial coordinates for clusters in this galaxy.
ra_g, dec_g = ra[j], dec[j]
# ASteCA distance moduli and their errors.
dm_g, e_dm_g = darr[j][0], dsigma[j][0]
# Retrieve center coordinates and distance (in parsecs) to galaxy.
gal_cent, gal_dist, e_dm_dist = MCs_data(j)
gal_dist = gal_dist.kpc * u.kpc
theta = glx_PA + Angle('90d')
plot_bulge_plane(ra_g, dec_g, dm_g, e_dm_g, gal_dist, gal_cent,
glx_inc, theta)
| gpl-3.0 |
MicrosoftGenomics/FaST-LMM | fastlmm/association/LeaveOneChromosomeOut.py | 2 | 2547 | #!/usr/bin/env python2.7
#
# Written (W) 2014 Christian Widmer
# Copyright (C) 2014 Microsoft Research
"""
Created on 2014-03-11
@author: Christian Widmer
@summary: Module for performing GWAS
"""
import logging
import numpy as np
import scipy as sp
import pandas as pd
from scipy import stats
import pylab
import time
import fastlmm.inference as fastlmm
import fastlmm.util.util as util
from fastlmm.pyplink.snpreader.Bed import Bed
from fastlmm.util.pickle_io import load, save
from fastlmm.util.util import argintersect_left
class LeaveOneChromosomeOut(object):
"""LeaveOneChromosomeOut cross validation iterator (based on sklearn).
Provides train/test indices to split data in train test sets. Split
dataset into k consecutive folds according to which chromosome they belong to.
Each fold is then used a validation set once while the k - 1 remaining
fold form the training set.
Parameters
----------
chr : list
List of chromosome identifiers
indices : boolean, optional (default True)
Return train/test split as arrays of indices, rather than a boolean
mask array. Integer indices are required when dealing with sparse
matrices, since those cannot be indexed by boolean masks.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
"""
def __init__(self, chr_names, indices=True, random_state=None):
#random_state = check_random_state(random_state)
self.chr_names = np.array(chr_names)
self.unique_chr_names = list(set(chr_names))
self.unique_chr_names.sort()
assert len(self.unique_chr_names) > 1
self.n = len(self.chr_names)
self.n_folds = len(self.unique_chr_names)
self.indices = indices
self.idxs = np.arange(self.n)
def __iter__(self):
if self.indices:
ind = np.arange(self.n)
for chr_name in self.unique_chr_names:
test_index = self.chr_names == chr_name
train_index = np.logical_not(test_index)
if self.indices:
train_index = ind[train_index]
test_index = ind[test_index]
yield train_index, test_index
def __repr__(self):
return '%s.%s(n=%i, n_folds=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.n_folds,
)
def __len__(self):
return self.n_folds
| apache-2.0 |
apdavison/IzhikevichModel | PyNN/old/test_Izhikevich.py | 2 | 6043 |
from pyNN.random import RandomDistribution, NumpyRNG
#from pyNN.neuron import *
from pyNN.nest import *
from pyNN.utility import get_script_args, Timer, ProgressBar, init_logging, normalized_filename
import matplotlib.pyplot as plt
import numpy as np
globalTimeStep = 0.01
timeStep = globalTimeStep
setup(timestep=timeStep, min_delay=0.5)
a = 0.02
b = -0.1
c = -55.0
d = 6.0
I = 0
v_init = -70
u_init = b * v_init
neuronParameters = {
'a': a,
'b': b,
'c': c,
'd': d,
'i_offset': I
}
initialValues = {'u': u_init, 'v': v_init}
cell_type = Izhikevich(**neuronParameters)
neuron = create(cell_type)
neuron.initialize(**initialValues)
neuron.record('v')
totalTimes = np.zeros(0)
totalAmps = np.zeros(0)
times = np.linspace(0.0, 30.0, int(1 + (30.0 - 0.0) / timeStep))
amps = np.linspace(0.0, 0.0, int(1 + (30.0 - 0.0) / timeStep))
totalTimes = np.append(totalTimes, times)
totalAmps = np.append(totalAmps, amps)
times = np.linspace(30 + timeStep, 300, int((300 - 30) / timeStep))
amps = np.linspace(0.075 * timeStep, 0.075 * (300 - 30), int((300 - 30) / timeStep))
totalTimes = np.append(totalTimes, times)
totalAmps = np.append(totalAmps, amps)
injectedCurrent = StepCurrentSource(times=totalTimes, amplitudes=totalAmps)
injectedCurrent.inject_into(neuron)
#neuron.set(i_offset = 30)
run(300)
data = neuron.get_data().segments[0]
plt.ion()
fig = plt.figure(1, facecolor='white')
ax1 = fig.add_subplot(5, 4, 7)
ax1.get_xaxis().set_visible(False)
ax1.get_yaxis().set_visible(False)
ax1.spines['left'].set_color('None')
ax1.spines['right'].set_color('None')
ax1.spines['bottom'].set_color('None')
ax1.spines['top'].set_color('None')
ax1.set_title('(G) Class 1 excitable')
vm = data.filter(name='v')[0]
plt.plot(vm.times, vm, [0, 30, 300, 300],[-90, -90, -70, -90])
plt.show(block=False)
fig.canvas.draw()
############################################
## Sub-plot H: Class 2 excitable
############################################
timeStep = globalTimeStep
setup(timestep=timeStep, min_delay=0.5)
a = 0.2
b = 0.26
c = -65.0
d = 0.0
I = -0.5
v_init = -64.0
u_init = b * v_init
neuronParameters = {
'a': a,
'b': b,
'c': c,
'd': d,
'i_offset': I
}
initialValues = {'u': u_init, 'v': v_init}
cell_type = Izhikevich(**neuronParameters)
neuron = create(cell_type)
neuron.initialize(**initialValues)
neuron.record('v')
totalTimes = np.zeros(0)
totalAmps = np.zeros(0)
times = np.linspace(0.0, 30.0, int(1 + (30.0 - 0.0) / timeStep))
amps = np.linspace(-0.5, -0.5, int(1 + (30.0 - 0.0) / timeStep))
totalTimes = np.append(totalTimes, times)
totalAmps = np.append(totalAmps, amps)
times = np.linspace(30 + timeStep, 300, int((300 - 30) / timeStep))
amps = np.linspace(-0.5 + 0.015 * timeStep, -0.5 + 0.015 * (300 - 30), int((300 - 30) / timeStep))
totalTimes = np.append(totalTimes, times)
totalAmps = np.append(totalAmps, amps)
injectedCurrent = StepCurrentSource(times=totalTimes, amplitudes=totalAmps)
injectedCurrent.inject_into(neuron)
run(300)
data = neuron.get_data().segments[0]
plt.ion()
fig = plt.figure(1, facecolor='white')
ax1 = fig.add_subplot(5, 4, 8)
ax1.get_xaxis().set_visible(False)
ax1.get_yaxis().set_visible(False)
ax1.spines['left'].set_color('None')
ax1.spines['right'].set_color('None')
ax1.spines['bottom'].set_color('None')
ax1.spines['top'].set_color('None')
ax1.set_title('(H) Class 1 excitable')
vm = data.filter(name='v')[0]
plt.plot(vm.times, vm, [0, 30, 300, 300],[-90, -90,-70, -90]);
plt.show(block=False)
fig.canvas.draw()
#####################################################
## Sub-plot R: Accomodation
#####################################################
timeStep = globalTimeStep
setup(timestep=timeStep, min_delay=0.5)
a = 0.02
b = 1.0
c = -55.0
d = 4.0
I = 0.0
v_init = -65.0
u_init = -16.0
neuronParameters = {
'a': a,
'b': b,
'c': c,
'd': d,
'i_offset': I
}
initialValues = {'u': u_init, 'v': v_init}
cell_type = Izhikevich(**neuronParameters)
neuron = create(cell_type)
neuron.initialize(**initialValues)
neuron.record('v')
totalTimes = np.zeros(0)
totalAmps = np.zeros(0)
times = np.linspace(0.0, 200.0, int(1 + (200.0 - 0.0) / timeStep))
amps = np.linspace(0.0, 8.0, int(1 + (200.0 - 0.0) / timeStep))
totalTimes = np.append(totalTimes, times)
totalAmps = np.append(totalAmps, amps)
times = np.linspace(200 + timeStep, 300, int((300 - 200) / timeStep))
amps = np.linspace(0.0, 0.0, int((300 - 200) / timeStep))
totalTimes = np.append(totalTimes, times)
totalAmps = np.append(totalAmps, amps)
times = np.linspace(300 + timeStep, 312.5, int((312.5 - 300) / timeStep))
amps = np.linspace(0.0, 4.0, int((312.5 - 300) / timeStep))
totalTimes = np.append(totalTimes, times)
totalAmps = np.append(totalAmps, amps)
times = np.linspace(312.5 + timeStep, 400, int((400 - 312.5) / timeStep))
amps = np.linspace(0.0, 0.0, int((400 - 312.5) / timeStep))
totalTimes = np.append(totalTimes, times)
totalAmps = np.append(totalAmps, amps)
injectedCurrent = StepCurrentSource(times=totalTimes, amplitudes=totalAmps)
injectedCurrent.inject_into(neuron)
run(400.0)
data = neuron.get_data().segments[0]
plt.ion()
fig = plt.figure(1, facecolor='white')
ax1 = fig.add_subplot(5, 4, 18)
#plt.xlabel("Time (ms)")
#plt.ylabel("Vm (mV)")
ax1.get_xaxis().set_visible(False)
ax1.get_yaxis().set_visible(False)
ax1.spines['left'].set_color('None')
ax1.spines['right'].set_color('None')
ax1.spines['bottom'].set_color('None')
ax1.spines['top'].set_color('None')
ax1.set_title('(R) Accomodation')
vm = data.filter(name='v')[0]
plt.plot(vm.times, vm, totalTimes,1.5 * totalAmps - 90);
plt.show(block=False)
fig.canvas.draw()
raw_input("Simulation finished... Press enter to exit...")
| bsd-3-clause |
tf-czu/EFD | image.py | 1 | 4172 | #!/usr/bin/python
"""
Simple tools for images
usage:
python image.py <input img> <color> <treshold value>
"""
import sys
import numpy as np
import cv2
from matplotlib import pyplot as plt
from contours import *
ker1 = 10
CUTING = False
Xi = 100
Yi = 200
Xe = 3900
Ye = 2600
def cutImage(img, xi, yi, xe, ye, imShow = False ):
img = img[yi:ye, xi:xe]
if imShow == True:
showImg( img )
return img
def writeLabelsInImg( img, referencePoints1, outFileName, referencePoints2 = None, resize = None ):
num1 = 0
#offset = np.array([100,0]) #for tae2016 only
color1 = (0,0,255)
font = cv2.FONT_HERSHEY_SIMPLEX
for point in referencePoints1:
point = tuple(point)
#point = tuple(point - offset) #for tae2016 only
cv2.putText(img, str(num1),point, font, 3,color1,2 )
num1 += 1
if referencePoints2:
offset = np.array([150, 0])
num2 = 0
color2 = (255, 0, 0)
for point2 in referencePoints2:
#print point2
point2 = point2 + offset
#print"point", point2
point2 = tuple(point2)
cv2.putText(img, str(num2),point2, font, 2,color2,2 )
num2 += 1
if resize:
img = cv2.resize(img, None, fx = resize, fy = resize, interpolation = cv2.INTER_CUBIC)
cv2.imwrite( outFileName, img )
return img
def writeImg( img, fileName ):
cv2.imwrite( fileName, img )
def showImg( img ):
cv2.imshow( 'image', img )
cv2.waitKey(0)
cv2.destroyAllWindows()
def getHist( grayImg ):
# showImg( grayImg )
plt.hist( grayImg.ravel(), 256,[0,256])
plt.show()
def getGrayImg( img, color = "g" ):
gray = None
b,g,r = cv2.split( img )
if color == "g":
gray = g
elif color == "b":
gray = b
elif color == "r":
gray = r
else:
print "color is not defined!"
return gray
def getThreshold( gray, thrValue ):
ret, binaryImg = cv2.threshold( gray, thrValue, 255,cv2.THRESH_BINARY_INV)
return binaryImg
def openingClosing( binaryImg, ker1 = 10, ker2 = 5 ):
newBinaryImg = None
kernel = np.ones( ( ker1, ker1 ), np.uint8 )
newBinaryImg = cv2.morphologyEx( binaryImg, cv2.MORPH_OPEN, kernel)
if ker2 != None:
kernel = np.ones( ( ker2, ker2 ), np.uint8 )
newBinaryImg = cv2.morphologyEx( newBinaryImg, cv2.MORPH_CLOSE, kernel)
return newBinaryImg
def imageMain( imageFile, tresh, color, cuting = True ):
img = cv2.imread( imageFile, 1 )
if cuting == True:
img = cutImage(img, Xi, Yi, Xe, Ye, imShow = False )
cv2.imwrite( "cutedImg.png", img )
gray = getGrayImg( img, color )
grayImgName = imageFile.split(".")[0]+"_gray.png"
cv2.imwrite( grayImgName, gray )
getHist( gray )
if tresh:
binaryImg = getThreshold( gray, tresh )
binaryImg2 = openingClosing( binaryImg, ker1, ker2 = None )
newImgName = imageFile.split(".")[0]+"_test.png"
newImgName2 = imageFile.split(".")[0]+"_test2.png"
#newImgName = "newImg.png"
cv2.imwrite( newImgName, binaryImg )
cv2.imwrite( newImgName2, binaryImg2 )
contoursList, hierarchy= cv2.findContours( binaryImg2.copy(), mode=cv2.RETR_EXTERNAL, method=cv2.CHAIN_APPROX_NONE)
contoursList, referencePoints = sortContours2(contoursList)
print "Number of cnt:", len(contoursList)
outFileNameLab = imageFile.split(".")[0]+"_lab.png"
#print outFileNameLab
img = writeLabelsInImg( img, referencePoints, outFileNameLab )
#cntNum = 100
#cv2.drawContours(img, contoursList, cntNum, (0,255,0), 3)
#cv2.imwrite( "imgLabCNT.png", img )
if __name__ == "__main__":
if len(sys.argv) < 2:
print __doc__
sys.exit(1)
color = "g"
tresh = None
if len(sys.argv) > 2:
color = sys.argv[2]
if len(sys.argv) > 3:
tresh = float(sys.argv[3])
cuting = CUTING
imageFile = sys.argv[1]
imageMain( imageFile, tresh, color, cuting )
| gpl-2.0 |
DailyActie/Surrogate-Model | 01-codes/scikit-learn-master/examples/cluster/plot_cluster_comparison.py | 1 | 4683 | """
=========================================================
Comparing different clustering algorithms on toy datasets
=========================================================
This example aims at showing characteristics of different
clustering algorithms on datasets that are "interesting"
but still in 2D. The last dataset is an example of a 'null'
situation for clustering: the data is homogeneous, and
there is no good clustering.
While these examples give some intuition about the algorithms,
this intuition might not apply to very high dimensional data.
The results could be improved by tweaking the parameters for
each clustering strategy, for instance setting the number of
clusters for the methods that needs this parameter
specified. Note that affinity propagation has a tendency to
create many clusters. Thus in this example its two parameters
(damping and per-point preference) were set to to mitigate this
behavior.
"""
print(__doc__)
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn import cluster, datasets
from sklearn.neighbors import kneighbors_graph
from sklearn.preprocessing import StandardScaler
np.random.seed(0)
# Generate datasets. We choose the size big enough to see the scalability
# of the algorithms, but not too big to avoid too long running times
n_samples = 1500
noisy_circles = datasets.make_circles(n_samples=n_samples, factor=.5,
noise=.05)
noisy_moons = datasets.make_moons(n_samples=n_samples, noise=.05)
blobs = datasets.make_blobs(n_samples=n_samples, random_state=8)
no_structure = np.random.rand(n_samples, 2), None
colors = np.array([x for x in 'bgrcmykbgrcmykbgrcmykbgrcmyk'])
colors = np.hstack([colors] * 20)
clustering_names = [
'MiniBatchKMeans', 'AffinityPropagation', 'MeanShift',
'SpectralClustering', 'Ward', 'AgglomerativeClustering',
'DBSCAN', 'Birch']
plt.figure(figsize=(len(clustering_names) * 2 + 3, 9.5))
plt.subplots_adjust(left=.02, right=.98, bottom=.001, top=.96, wspace=.05,
hspace=.01)
plot_num = 1
datasets = [noisy_circles, noisy_moons, blobs, no_structure]
for i_dataset, dataset in enumerate(datasets):
X, y = dataset
# normalize dataset for easier parameter selection
X = StandardScaler().fit_transform(X)
# estimate bandwidth for mean shift
bandwidth = cluster.estimate_bandwidth(X, quantile=0.3)
# connectivity matrix for structured Ward
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
# make connectivity symmetric
connectivity = 0.5 * (connectivity + connectivity.T)
# create clustering estimators
ms = cluster.MeanShift(bandwidth=bandwidth, bin_seeding=True)
two_means = cluster.MiniBatchKMeans(n_clusters=2)
ward = cluster.AgglomerativeClustering(n_clusters=2, linkage='ward',
connectivity=connectivity)
spectral = cluster.SpectralClustering(n_clusters=2,
eigen_solver='arpack',
affinity="nearest_neighbors")
dbscan = cluster.DBSCAN(eps=.2)
affinity_propagation = cluster.AffinityPropagation(damping=.9,
preference=-200)
average_linkage = cluster.AgglomerativeClustering(
linkage="average", affinity="cityblock", n_clusters=2,
connectivity=connectivity)
birch = cluster.Birch(n_clusters=2)
clustering_algorithms = [
two_means, affinity_propagation, ms, spectral, ward, average_linkage,
dbscan, birch]
for name, algorithm in zip(clustering_names, clustering_algorithms):
# predict cluster memberships
t0 = time.time()
algorithm.fit(X)
t1 = time.time()
if hasattr(algorithm, 'labels_'):
y_pred = algorithm.labels_.astype(np.int)
else:
y_pred = algorithm.predict(X)
# plot
plt.subplot(4, len(clustering_algorithms), plot_num)
if i_dataset == 0:
plt.title(name, size=18)
plt.scatter(X[:, 0], X[:, 1], color=colors[y_pred].tolist(), s=10)
if hasattr(algorithm, 'cluster_centers_'):
centers = algorithm.cluster_centers_
center_colors = colors[:len(centers)]
plt.scatter(centers[:, 0], centers[:, 1], s=100, c=center_colors)
plt.xlim(-2, 2)
plt.ylim(-2, 2)
plt.xticks(())
plt.yticks(())
plt.text(.99, .01, ('%.2fs' % (t1 - t0)).lstrip('0'),
transform=plt.gca().transAxes, size=15,
horizontalalignment='right')
plot_num += 1
plt.show()
| mit |
martinpilat/dag-evaluate | custom_models.py | 1 | 6526 | __author__ = 'Martin'
import pandas as pd
import numpy as np
from scipy import stats
from sklearn import cross_validation
from sklearn import ensemble
def is_transformer(cls):
return hasattr(cls, '__dageva_type') and cls.__dageva_type == 'transformer'
def is_predictor(cls):
return hasattr(cls, '__dageva_type') and cls.__dageva_type == 'predictor'
def make_transformer(cls):
"""
Adds Transformer to the bases of the cls class, useful in order to distinguish between transformers and predictors.
:param cls: The class to turn into a Transformer
:return: A class equivalent to cls, but with Transformer among its bases
"""
cls.__dageva_type = 'transformer'
return cls
def make_predictor(cls):
"""
Adds Predictor to the bases of the cls class, useful in order to distinguish between transformers and predictors.
:param cls: The class to turn into a Predictor
:return: A class equivalent to cls, but with Predictor among its bases
"""
cls.__dageva_type = 'predictor'
return cls
class KMeansSplitter:
def __init__(self, k):
from sklearn import cluster
self.kmeans = cluster.KMeans(n_clusters=k)
self.sorted_outputs = None
self.weight_idx = []
def fit(self, x, y, sample_weight=None):
self.kmeans.fit(x, y)
preds = self.kmeans.predict(x)
out = []
for i in range(self.kmeans.n_clusters):
idx = [n for n in range(len(preds)) if preds[n] == i]
self.weight_idx.append(idx)
if isinstance(x, pd.DataFrame):
out.append(x.iloc[idx])
else:
out.append(x[idx])
mins = [len(x.index) for x in out]
self.sorted_outputs = list(np.argsort(mins))
self.weight_idx = [self.weight_idx[i] for i in self.sorted_outputs]
return self
def transform(self, x):
preds = self.kmeans.predict(x)
out = []
for i in range(self.kmeans.n_clusters):
idx = [n for n in range(len(preds)) if preds[n] == i]
if isinstance(x, pd.DataFrame):
out.append(x.iloc[idx])
else:
out.append(x[idx])
return [out[i] for i in self.sorted_outputs]
class ConstantModel:
def __init__(self, cls):
self.cls = cls
def fit(self, x, y):
return self
def predict(self, x):
return pd.Series(np.array([self.cls]*len(x)), index=x.index)
class Aggregator:
def aggregate(self, x, y):
pass
class Voter(Aggregator):
def fit(self, x, y):
return self
def union_aggregate(self, x, y):
f_list, t_list = x, y
f_frame, t_frame = pd.DataFrame(), pd.Series()
for i in range(len(t_list)):
fl = f_list[i]
assert isinstance(fl, pd.DataFrame)
if fl.columns.dtype == np.dtype('int64'):
cols = map(lambda z: str(id(fl)) + '_' + str(z), fl.columns)
fl.columns = cols
t_frame = t_frame.append(t_list[i])
f_frame = f_frame.append(f_list[i])
f_frame.sort_index(inplace=True)
t_frame = t_frame.sort_index()
return f_frame, t_frame
def aggregate(self, x, y):
if not all([x[0].index.equals(xi.index) for xi in x]):
return self.union_aggregate(x, y)
res = pd.DataFrame(index=y[0].index)
for i in range(len(y)):
res["p"+str(i)] = y[i]
modes = res.apply(lambda row: stats.mode(row, axis=None)[0][0], axis=1)
if modes.empty:
return x[0], pd.Series()
return x[0], pd.Series(modes, index=y[0].index)
class Workflow:
def __init__(self, dag=None):
self.dag = dag
self.sample_weight = None
self.classes_ = None
def fit(self, X, y, sample_weight=None):
import eval #TODO: Refactor to remove circular imports
self.models = eval.train_dag(self.dag, train_data=(X, y), sample_weight=sample_weight)
self.classes_ = np.unique(y)
return self
def predict(self, X):
import eval #TODO: Refactor to remove circular imports
return np.array(eval.test_dag(self.dag, self.models, test_data=(X, None)))
def transform(self, X):
import eval
return eval.test_dag(self.dag, self.models, test_data=(X, None), output='feats_only')
def get_params(self, deep=False):
return {'dag': self.dag}
def set_params(self, **params):
if 'sample_weight' in params:
self.sample_weight = params['sample_weight']
class Stacker(Aggregator):
def __init__(self, sub_dags=None, initial_dag=None):
self.sub_dags = sub_dags
self.initial_dag = initial_dag
def fit(self, X, y, sample_weight=None):
import eval
preds = [[] for _ in self.sub_dags]
for train_idx, test_idx in cross_validation.StratifiedKFold(y, n_folds=5):
tr_X, tr_y = X.iloc[train_idx], y.iloc[train_idx]
tst_X, tst_y = X.iloc[test_idx], y.iloc[test_idx]
wf_init = Workflow(self.initial_dag)
wf_init.fit(tr_X, tr_y, sample_weight=sample_weight)
preproc_X, preproc_y = eval.test_dag(self.initial_dag, wf_init.models, test_data=(tr_X, tr_y), output='all')
pp_tst_X = wf_init.transform(tst_X)
if pp_tst_X.empty:
continue
for i, dag in enumerate(self.sub_dags):
wf = Workflow(dag)
wf.fit(preproc_X, preproc_y)
res = wf.predict(pp_tst_X)
preds[i].append(pd.DataFrame(res, index=pp_tst_X.index))
preds = [pd.concat(ps) for ps in preds]
self.train = pd.concat(preds, axis=1)
self.train.columns = ['p' + str(x) for x in range(len(preds))]
return self
def aggregate(self, X, y):
res = pd.DataFrame(index=y[0].index)
for i in range(len(X)):
res["p" + str(i)] = y[i]
return res, y[0]
class Booster(ensemble.AdaBoostClassifier):
def __init__(self, sub_dags=()):
self.sub_dags = sub_dags
self.current_sub_dag = 0
super(Booster, self).__init__(base_estimator=Workflow(), n_estimators=len(sub_dags), algorithm='SAMME')
def _make_estimator(self, append=True, random_state=0):
estimator = Workflow(self.sub_dags[self.current_sub_dag])
self.current_sub_dag += 1
if append:
self.estimators_.append(estimator)
return estimator | mit |
ChanChiChoi/scikit-learn | sklearn/manifold/tests/test_mds.py | 324 | 1862 | import numpy as np
from numpy.testing import assert_array_almost_equal
from nose.tools import assert_raises
from sklearn.manifold import mds
def test_smacof():
# test metric smacof using the data of "Modern Multidimensional Scaling",
# Borg & Groenen, p 154
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.451, .252],
[.016, -.238],
[-.200, .524]])
X, _ = mds.smacof(sim, init=Z, n_components=2, max_iter=1, n_init=1)
X_true = np.array([[-1.415, -2.471],
[1.633, 1.107],
[.249, -.067],
[-.468, 1.431]])
assert_array_almost_equal(X, X_true, decimal=3)
def test_smacof_error():
# Not symmetric similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# Not squared similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# init not None and not correct format:
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.016, -.238],
[-.200, .524]])
assert_raises(ValueError, mds.smacof, sim, init=Z, n_init=1)
def test_MDS():
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
mds_clf = mds.MDS(metric=False, n_jobs=3, dissimilarity="precomputed")
mds_clf.fit(sim)
| bsd-3-clause |
rkube/blob_tracking | analysis/correlate_test.py | 1 | 7170 | #!/opt/local/bin/python
#-*- Encoding: UTF-8 -*-
import numpy as np
import matplotlib.pyplot as plt
from misc.correlate import correlate
from sys import exit
"""
Correlate timeseries of two pixels. Trigger on large amplitudes.
Try estimating radial blob velocity from this.
"""
shotnr = 1120711010
frame0 = 20000
nframes = 25000
wlen = 10 # Window length for correlation analysis
gpi_fps = 390.8e3 # Phantom camera frame rate
tau = 1. / gpi_fps
dx = 0.061 / 64.
tau_max = 10
num_blobs = 100
blob_vel = np.zeros(num_blobs)
frames_file = np.load('%d/%d_frames.npz' % ( shotnr, shotnr) )
frames = frames_file['frames_normalized_mean']
ts_pixel1 = frames[ :, 48, 53 ]
ts_pixel2 = frames[ :, 48, 55 ]
ts_pixel3 = frames[ :, 48, 57 ]
ts_pixel4 = frames[ :, 48, 59 ]
ts_pixel5 = frames[ :, 48, 61 ]
#np.savez('corr_ts.npz', ts_pixel1 = ts_pixel1, ts_pixel2 = ts_pixel2, ts_pixel3 = ts_pixel3, ts_pixel4 = ts_pixel4, ts_pixel5 = ts_pixel5 )
#df = np.load('test/corr_ts.npz')
#ts_pixel1 = df['ts_pixel1']
#ts_pixel2 = df['ts_pixel2']
#ts_pixel3 = df['ts_pixel3']
#ts_pixel4 = df['ts_pixel4']
#ts_pixel5 = df['ts_pixel5']
plt.figure()
plt.plot( np.arange( frame0 ), ts_pixel1[:frame0], 'k' )
plt.plot( np.arange( frame0, frame0 + nframes ), ts_pixel1[frame0:frame0 + nframes], 'r' )
plt.plot( np.arange( frame0 + nframes, np.size(ts_pixel1)), ts_pixel1[frame0 + nframes:], 'k' )
plt.plot( np.arange( frame0 ), ts_pixel2[:frame0] + 3.0, 'k' )
plt.plot( np.arange( frame0, frame0 + nframes ), ts_pixel2[frame0:frame0 + nframes] + 3.0, 'r')
plt.plot( np.arange( frame0 + nframes, np.size(ts_pixel1)), ts_pixel2[frame0 + nframes:] + 3.0, 'k' )
plt.plot( np.arange( frame0 ), ts_pixel3[:frame0] + 6.0, 'k' )
plt.plot( np.arange( frame0, frame0 + nframes ), ts_pixel3[frame0:frame0 + nframes] + 6.0, 'r' )
plt.plot( np.arange( frame0 + nframes, np.size(ts_pixel1)), ts_pixel3[frame0 + nframes:] + 6.0, 'k' )
plt.plot( np.arange( frame0 ), ts_pixel4[:frame0] + 9.0, 'k' )
plt.plot( np.arange( frame0, frame0 + nframes ), ts_pixel4[frame0:frame0 + nframes]+ 6.0, 'r' )
plt.plot( np.arange( frame0 + nframes, np.size(ts_pixel1)), ts_pixel4[frame0 + nframes:] + 6.0, 'k' )
plt.plot( np.arange( frame0 ), ts_pixel5[:frame0] + 9.0, 'k' )
plt.plot( np.arange( frame0, frame0 + nframes ), ts_pixel5[frame0:frame0 + nframes] + 9.0, 'r' )
plt.plot( np.arange( frame0 + nframes, np.size(ts_pixel1)), ts_pixel5[frame0 + nframes:] + 9.0, 'k' )
#plt.show()
ts_pixel1 = ts_pixel1[frame0 : frame0 + nframes]
ts_pixel2 = ts_pixel2[frame0 : frame0 + nframes]
ts_pixel3 = ts_pixel3[frame0 : frame0 + nframes]
ts_pixel4 = ts_pixel4[frame0 : frame0 + nframes]
ts_pixel5 = ts_pixel5[frame0 : frame0 + nframes]
# Take the 100 largest blobs and estimate their velocity
ts1_sortidx = ts_pixel1.argsort()[-num_blobs:]
plt.figure()
plt.plot(ts_pixel1[ts1_sortidx])
for idx, max_idx in enumerate(ts1_sortidx):
if ( max_idx == -1 ):
print 'Index was blanked out previously, skipping to next index'
continue
elif ( max_idx < wlen ):
print 'Too close too boundaries for full correlation, skipping to next index'
continue
# Blank out all other peaks occuring within +- 10 frames
print 'before:', max_idx, ts1_sortidx
close_peak_indices = np.squeeze(np.argwhere( np.abs(ts1_sortidx - max_idx) < 10 ))
print 'close_peak_indices:', close_peak_indices, ' entries:', ts1_sortidx[ close_peak_indices ]
ts1_sortidx[ close_peak_indices ] = -1
print 'after:', max_idx, ts1_sortidx
print max_idx
fig = plt.figure()
plt.subplot(211)
plt.title('max_idx = %d' % max_idx)
plt.xlabel('frame no.')
plt.ylabel('I tilde')
plt.plot( np.arange( frame0 + max_idx - wlen, frame0 + max_idx + wlen), ts_pixel1[ max_idx - wlen : max_idx + wlen ] )
plt.plot( np.arange( frame0 + max_idx - wlen, frame0 + max_idx + wlen), ts_pixel2[ max_idx - wlen : max_idx + wlen ] )
plt.plot( np.arange( frame0 + max_idx - wlen, frame0 + max_idx + wlen), ts_pixel3[ max_idx - wlen : max_idx + wlen ] )
plt.plot( np.arange( frame0 + max_idx - wlen, frame0 + max_idx + wlen), ts_pixel4[ max_idx - wlen : max_idx + wlen ] )
plt.plot( np.arange( frame0 + max_idx - wlen, frame0 + max_idx + wlen), ts_pixel5[ max_idx - wlen : max_idx + wlen ] )
plt.subplot(212)
plt.xlabel('Time lag tau')
plt.ylabel('Correlation amplitude')
tau_range = np.arange( -tau_max, tau_max )
# Compute the correlation between the timeseries of neighbouring pixels. The maximum of
# the correlation amplitude is used to compute the radial blob velocity.
# Limit the neighbourhood in which the peak correlation amplitude may be to +- 10 frames.
c11 = correlate( ts_pixel1[ max_idx - wlen - 1: max_idx + wlen + 1], ts_pixel1[ max_idx - wlen - 1 : max_idx + wlen + 1], 2*wlen )
c11 = c11[ 2*wlen - tau_max : 2*wlen + tau_max]
plt.plot(tau_range, c11)
plt.plot(tau_range[c11.argmax()], c11.max(), 'ko')
c12 = correlate( ts_pixel1[ max_idx - wlen - 1: max_idx + wlen + 1], ts_pixel2[ max_idx - wlen - 1 : max_idx + wlen + 1], 2*wlen )
c12 = c12[ 2*wlen - tau_max : 2*wlen + tau_max]
max_c12 = c12.argmax()
plt.plot(tau_range, c12)
plt.plot(tau_range[c12.argmax()], c12.max(), 'ko')
c13 = correlate( ts_pixel1[ max_idx - wlen - 1: max_idx + wlen + 1], ts_pixel3[ max_idx - wlen - 1: max_idx + wlen +1 ], 2*wlen )
c13 = c13[ 2*wlen - tau_max : 2*wlen + tau_max]
max_c13 = c13.argmax()
plt.plot(tau_range, c13)
plt.plot(tau_range[c13.argmax()], c13.max(), 'ko')
c14 = correlate( ts_pixel1[ max_idx - wlen - 1: max_idx + wlen + 1], ts_pixel3[ max_idx - wlen - 1: max_idx + wlen + 1], 2*wlen )
c14 = c14[ 2*wlen - tau_max : 2*wlen + tau_max]
max_c14 = c14.argmax()
plt.plot(tau_range, c14)
plt.plot(tau_range[c14.argmax()], c14.max(), 'ko')
c15 = correlate( ts_pixel1[ max_idx - wlen - 1: max_idx + wlen + 1], ts_pixel5[ max_idx - wlen - 1: max_idx + wlen + 1], 2*wlen )
c15 = c15[ 2*wlen - tau_max : 2*wlen + tau_max]
max_c15 = c15.argmax()
plt.plot(tau_range, c15)
plt.plot(tau_range[c15.argmax()], c15.max(), 'ko')
fig.savefig('%d/vrad_correlation/%d_frame%05d.png' % ( shotnr, shotnr, max_idx ) )
plt.close()
# Estimate radial blob velocity by propagation of correlation amplitude
v_c12 = gpi_fps * 2.0*dx / (2*wlen - max_c12)
v_c13 = gpi_fps * 4.0*dx / (2*wlen - max_c13)
v_c14 = gpi_fps * 6.0*dx / (2*wlen - max_c14)
v_c15 = gpi_fps * 8.0*dx / (2*wlen - max_c15)
print 'Blob velocities from correlation method:'
print 'px1 - px2: %f, px1 - px3: %f, px1 - px4: %f, px1 - px5: %f' % (v_c12, v_c13, v_c14, v_c15 )
print 'mean: %f' % np.mean( np.array([v_c12, v_c13, v_c14, v_c15]) )
blob_vel[idx] = np.mean( np.array([v_c12, v_c13, v_c14, v_c15]) )
blob_vel = blob_vel[blob_vel != 0]
plt.figure()
plt.plot(blob_vel, '.')
plt.xlabel('Blob event no.')
plt.ylabel('Radial velocity m/s')
print '================================================================================='
print 'mean over all blobs: %f' % blob_vel.mean()
plt.show()
| mit |
WilsonWangTHU/fast-rcnn | lib/fast_rcnn/test.py | 43 | 11975 | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Test a Fast R-CNN network on an imdb (image database)."""
from fast_rcnn.config import cfg, get_output_dir
import argparse
from utils.timer import Timer
import numpy as np
import cv2
import caffe
from utils.cython_nms import nms
import cPickle
import heapq
from utils.blob import im_list_to_blob
import os
def _get_image_blob(im):
"""Converts an image into a network input.
Arguments:
im (ndarray): a color image in BGR order
Returns:
blob (ndarray): a data blob holding an image pyramid
im_scale_factors (list): list of image scales (relative to im) used
in the image pyramid
"""
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
im_shape = im_orig.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
processed_ims = []
im_scale_factors = []
for target_size in cfg.TEST.SCALES:
im_scale = float(target_size) / float(im_size_min)
# Prevent the biggest axis from being more than MAX_SIZE
if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:
im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)
im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,
interpolation=cv2.INTER_LINEAR)
im_scale_factors.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, np.array(im_scale_factors)
def _get_rois_blob(im_rois, im_scale_factors):
"""Converts RoIs into network inputs.
Arguments:
im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
im_scale_factors (list): scale factors as returned by _get_image_blob
Returns:
blob (ndarray): R x 5 matrix of RoIs in the image pyramid
"""
rois, levels = _project_im_rois(im_rois, im_scale_factors)
rois_blob = np.hstack((levels, rois))
return rois_blob.astype(np.float32, copy=False)
def _project_im_rois(im_rois, scales):
"""Project image RoIs into the image pyramid built by _get_image_blob.
Arguments:
im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
scales (list): scale factors as returned by _get_image_blob
Returns:
rois (ndarray): R x 4 matrix of projected RoI coordinates
levels (list): image pyramid levels used by each projected RoI
"""
im_rois = im_rois.astype(np.float, copy=False)
if len(scales) > 1:
widths = im_rois[:, 2] - im_rois[:, 0] + 1
heights = im_rois[:, 3] - im_rois[:, 1] + 1
areas = widths * heights
scaled_areas = areas[:, np.newaxis] * (scales[np.newaxis, :] ** 2)
diff_areas = np.abs(scaled_areas - 224 * 224)
levels = diff_areas.argmin(axis=1)[:, np.newaxis]
else:
levels = np.zeros((im_rois.shape[0], 1), dtype=np.int)
rois = im_rois * scales[levels]
return rois, levels
def _get_blobs(im, rois):
"""Convert an image and RoIs within that image into network inputs."""
blobs = {'data' : None, 'rois' : None}
blobs['data'], im_scale_factors = _get_image_blob(im)
blobs['rois'] = _get_rois_blob(rois, im_scale_factors)
return blobs, im_scale_factors
def _bbox_pred(boxes, box_deltas):
"""Transform the set of class-agnostic boxes into class-specific boxes
by applying the predicted offsets (box_deltas)
"""
if boxes.shape[0] == 0:
return np.zeros((0, box_deltas.shape[1]))
boxes = boxes.astype(np.float, copy=False)
widths = boxes[:, 2] - boxes[:, 0] + cfg.EPS
heights = boxes[:, 3] - boxes[:, 1] + cfg.EPS
ctr_x = boxes[:, 0] + 0.5 * widths
ctr_y = boxes[:, 1] + 0.5 * heights
dx = box_deltas[:, 0::4]
dy = box_deltas[:, 1::4]
dw = box_deltas[:, 2::4]
dh = box_deltas[:, 3::4]
pred_ctr_x = dx * widths[:, np.newaxis] + ctr_x[:, np.newaxis]
pred_ctr_y = dy * heights[:, np.newaxis] + ctr_y[:, np.newaxis]
pred_w = np.exp(dw) * widths[:, np.newaxis]
pred_h = np.exp(dh) * heights[:, np.newaxis]
pred_boxes = np.zeros(box_deltas.shape)
# x1
pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * pred_w
# y1
pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * pred_h
# x2
pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * pred_w
# y2
pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * pred_h
return pred_boxes
def _clip_boxes(boxes, im_shape):
"""Clip boxes to image boundaries."""
# x1 >= 0
boxes[:, 0::4] = np.maximum(boxes[:, 0::4], 0)
# y1 >= 0
boxes[:, 1::4] = np.maximum(boxes[:, 1::4], 0)
# x2 < im_shape[1]
boxes[:, 2::4] = np.minimum(boxes[:, 2::4], im_shape[1] - 1)
# y2 < im_shape[0]
boxes[:, 3::4] = np.minimum(boxes[:, 3::4], im_shape[0] - 1)
return boxes
def im_detect(net, im, boxes):
"""Detect object classes in an image given object proposals.
Arguments:
net (caffe.Net): Fast R-CNN network to use
im (ndarray): color image to test (in BGR order)
boxes (ndarray): R x 4 array of object proposals
Returns:
scores (ndarray): R x K array of object class scores (K includes
background as object category 0)
boxes (ndarray): R x (4*K) array of predicted bounding boxes
"""
blobs, unused_im_scale_factors = _get_blobs(im, boxes)
# When mapping from image ROIs to feature map ROIs, there's some aliasing
# (some distinct image ROIs get mapped to the same feature ROI).
# Here, we identify duplicate feature ROIs, so we only compute features
# on the unique subset.
if cfg.DEDUP_BOXES > 0:
v = np.array([1, 1e3, 1e6, 1e9, 1e12])
hashes = np.round(blobs['rois'] * cfg.DEDUP_BOXES).dot(v)
_, index, inv_index = np.unique(hashes, return_index=True,
return_inverse=True)
blobs['rois'] = blobs['rois'][index, :]
boxes = boxes[index, :]
# reshape network inputs
net.blobs['data'].reshape(*(blobs['data'].shape))
net.blobs['rois'].reshape(*(blobs['rois'].shape))
blobs_out = net.forward(data=blobs['data'].astype(np.float32, copy=False),
rois=blobs['rois'].astype(np.float32, copy=False))
if cfg.TEST.SVM:
# use the raw scores before softmax under the assumption they
# were trained as linear SVMs
scores = net.blobs['cls_score'].data
else:
# use softmax estimated probabilities
scores = blobs_out['cls_prob']
if cfg.TEST.BBOX_REG:
# Apply bounding-box regression deltas
box_deltas = blobs_out['bbox_pred']
pred_boxes = _bbox_pred(boxes, box_deltas)
pred_boxes = _clip_boxes(pred_boxes, im.shape)
else:
# Simply repeat the boxes, once for each class
pred_boxes = np.tile(boxes, (1, scores.shape[1]))
if cfg.DEDUP_BOXES > 0:
# Map scores and predictions back to the original set of boxes
scores = scores[inv_index, :]
pred_boxes = pred_boxes[inv_index, :]
return scores, pred_boxes
def vis_detections(im, class_name, dets, thresh=0.3):
"""Visual debugging of detections."""
import matplotlib.pyplot as plt
im = im[:, :, (2, 1, 0)]
for i in xrange(np.minimum(10, dets.shape[0])):
bbox = dets[i, :4]
score = dets[i, -1]
if score > thresh:
plt.cla()
plt.imshow(im)
plt.gca().add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='g', linewidth=3)
)
plt.title('{} {:.3f}'.format(class_name, score))
plt.show()
def apply_nms(all_boxes, thresh):
"""Apply non-maximum suppression to all predicted boxes output by the
test_net method.
"""
num_classes = len(all_boxes)
num_images = len(all_boxes[0])
nms_boxes = [[[] for _ in xrange(num_images)]
for _ in xrange(num_classes)]
for cls_ind in xrange(num_classes):
for im_ind in xrange(num_images):
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
keep = nms(dets, thresh)
if len(keep) == 0:
continue
nms_boxes[cls_ind][im_ind] = dets[keep, :].copy()
return nms_boxes
def test_net(net, imdb):
"""Test a Fast R-CNN network on an image database."""
num_images = len(imdb.image_index)
# heuristic: keep an average of 40 detections per class per images prior
# to NMS
max_per_set = 40 * num_images
# heuristic: keep at most 100 detection per class per image prior to NMS
max_per_image = 100
# detection thresold for each class (this is adaptively set based on the
# max_per_set constraint)
thresh = -np.inf * np.ones(imdb.num_classes)
# top_scores will hold one minheap of scores per class (used to enforce
# the max_per_set constraint)
top_scores = [[] for _ in xrange(imdb.num_classes)]
# all detections are collected into:
# all_boxes[cls][image] = N x 5 array of detections in
# (x1, y1, x2, y2, score)
all_boxes = [[[] for _ in xrange(num_images)]
for _ in xrange(imdb.num_classes)]
output_dir = get_output_dir(imdb, net)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# timers
_t = {'im_detect' : Timer(), 'misc' : Timer()}
roidb = imdb.roidb
for i in xrange(num_images):
im = cv2.imread(imdb.image_path_at(i))
_t['im_detect'].tic()
scores, boxes = im_detect(net, im, roidb[i]['boxes'])
_t['im_detect'].toc()
_t['misc'].tic()
for j in xrange(1, imdb.num_classes):
inds = np.where((scores[:, j] > thresh[j]) &
(roidb[i]['gt_classes'] == 0))[0]
cls_scores = scores[inds, j]
cls_boxes = boxes[inds, j*4:(j+1)*4]
top_inds = np.argsort(-cls_scores)[:max_per_image]
cls_scores = cls_scores[top_inds]
cls_boxes = cls_boxes[top_inds, :]
# push new scores onto the minheap
for val in cls_scores:
heapq.heappush(top_scores[j], val)
# if we've collected more than the max number of detection,
# then pop items off the minheap and update the class threshold
if len(top_scores[j]) > max_per_set:
while len(top_scores[j]) > max_per_set:
heapq.heappop(top_scores[j])
thresh[j] = top_scores[j][0]
all_boxes[j][i] = \
np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \
.astype(np.float32, copy=False)
if 0:
keep = nms(all_boxes[j][i], 0.3)
vis_detections(im, imdb.classes[j], all_boxes[j][i][keep, :])
_t['misc'].toc()
print 'im_detect: {:d}/{:d} {:.3f}s {:.3f}s' \
.format(i + 1, num_images, _t['im_detect'].average_time,
_t['misc'].average_time)
for j in xrange(1, imdb.num_classes):
for i in xrange(num_images):
inds = np.where(all_boxes[j][i][:, -1] > thresh[j])[0]
all_boxes[j][i] = all_boxes[j][i][inds, :]
det_file = os.path.join(output_dir, 'detections.pkl')
with open(det_file, 'wb') as f:
cPickle.dump(all_boxes, f, cPickle.HIGHEST_PROTOCOL)
print 'Applying NMS to all detections'
nms_dets = apply_nms(all_boxes, cfg.TEST.NMS)
print 'Evaluating detections'
imdb.evaluate_detections(nms_dets, output_dir)
| mit |
rrohan/scikit-learn | sklearn/pipeline.py | 61 | 21271 | """
The :mod:`sklearn.pipeline` module implements utilities to build a composite
estimator, as a chain of transforms and estimators.
"""
# Author: Edouard Duchesnay
# Gael Varoquaux
# Virgile Fritsch
# Alexandre Gramfort
# Lars Buitinck
# Licence: BSD
from collections import defaultdict
from warnings import warn
import numpy as np
from scipy import sparse
from .base import BaseEstimator, TransformerMixin
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import tosequence
from .utils.metaestimators import if_delegate_has_method
from .externals.six import iteritems
__all__ = ['Pipeline', 'FeatureUnion']
class Pipeline(BaseEstimator):
"""Pipeline of transforms with a final estimator.
Sequentially apply a list of transforms and a final estimator.
Intermediate steps of the pipeline must be 'transforms', that is, they
must implement fit and transform methods.
The final estimator only needs to implement fit.
The purpose of the pipeline is to assemble several steps that can be
cross-validated together while setting different parameters.
For this, it enables setting parameters of the various steps using their
names and the parameter name separated by a '__', as in the example below.
Read more in the :ref:`User Guide <pipeline>`.
Parameters
----------
steps : list
List of (name, transform) tuples (implementing fit/transform) that are
chained, in the order in which they are chained, with the last object
an estimator.
Attributes
----------
named_steps : dict
Read-only attribute to access any step parameter by user given name.
Keys are step names and values are steps parameters.
Examples
--------
>>> from sklearn import svm
>>> from sklearn.datasets import samples_generator
>>> from sklearn.feature_selection import SelectKBest
>>> from sklearn.feature_selection import f_regression
>>> from sklearn.pipeline import Pipeline
>>> # generate some data to play with
>>> X, y = samples_generator.make_classification(
... n_informative=5, n_redundant=0, random_state=42)
>>> # ANOVA SVM-C
>>> anova_filter = SelectKBest(f_regression, k=5)
>>> clf = svm.SVC(kernel='linear')
>>> anova_svm = Pipeline([('anova', anova_filter), ('svc', clf)])
>>> # You can set the parameters using the names issued
>>> # For instance, fit using a k of 10 in the SelectKBest
>>> # and a parameter 'C' of the svm
>>> anova_svm.set_params(anova__k=10, svc__C=.1).fit(X, y)
... # doctest: +ELLIPSIS
Pipeline(steps=[...])
>>> prediction = anova_svm.predict(X)
>>> anova_svm.score(X, y) # doctest: +ELLIPSIS
0.77...
>>> # getting the selected features chosen by anova_filter
>>> anova_svm.named_steps['anova'].get_support()
... # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, False, False, True, False, True, True, True,
False, False, True, False, True, False, False, False, False,
True], dtype=bool)
"""
# BaseEstimator interface
def __init__(self, steps):
names, estimators = zip(*steps)
if len(dict(steps)) != len(steps):
raise ValueError("Provided step names are not unique: %s" % (names,))
# shallow copy of steps
self.steps = tosequence(steps)
transforms = estimators[:-1]
estimator = estimators[-1]
for t in transforms:
if (not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not
hasattr(t, "transform")):
raise TypeError("All intermediate steps of the chain should "
"be transforms and implement fit and transform"
" '%s' (type %s) doesn't)" % (t, type(t)))
if not hasattr(estimator, "fit"):
raise TypeError("Last step of chain should implement fit "
"'%s' (type %s) doesn't)"
% (estimator, type(estimator)))
@property
def _estimator_type(self):
return self.steps[-1][1]._estimator_type
def get_params(self, deep=True):
if not deep:
return super(Pipeline, self).get_params(deep=False)
else:
out = self.named_steps
for name, step in six.iteritems(self.named_steps):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
out.update(super(Pipeline, self).get_params(deep=False))
return out
@property
def named_steps(self):
return dict(self.steps)
@property
def _final_estimator(self):
return self.steps[-1][1]
# Estimator interface
def _pre_transform(self, X, y=None, **fit_params):
fit_params_steps = dict((step, {}) for step, _ in self.steps)
for pname, pval in six.iteritems(fit_params):
step, param = pname.split('__', 1)
fit_params_steps[step][param] = pval
Xt = X
for name, transform in self.steps[:-1]:
if hasattr(transform, "fit_transform"):
Xt = transform.fit_transform(Xt, y, **fit_params_steps[name])
else:
Xt = transform.fit(Xt, y, **fit_params_steps[name]) \
.transform(Xt)
return Xt, fit_params_steps[self.steps[-1][0]]
def fit(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then fit the transformed data using the final estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
self.steps[-1][-1].fit(Xt, y, **fit_params)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then use fit_transform on transformed data using the final
estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
if hasattr(self.steps[-1][-1], 'fit_transform'):
return self.steps[-1][-1].fit_transform(Xt, y, **fit_params)
else:
return self.steps[-1][-1].fit(Xt, y, **fit_params).transform(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def predict(self, X):
"""Applies transforms to the data, and the predict method of the
final estimator. Valid only if the final estimator implements
predict.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def fit_predict(self, X, y=None, **fit_params):
"""Applies fit_predict of last step in pipeline after transforms.
Applies fit_transforms of a pipeline to the data, followed by the
fit_predict method of the final estimator in the pipeline. Valid
only if the final estimator implements fit_predict.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of
the pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps
of the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
return self.steps[-1][-1].fit_predict(Xt, y, **fit_params)
@if_delegate_has_method(delegate='_final_estimator')
def predict_proba(self, X):
"""Applies transforms to the data, and the predict_proba method of the
final estimator. Valid only if the final estimator implements
predict_proba.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_proba(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def decision_function(self, X):
"""Applies transforms to the data, and the decision_function method of
the final estimator. Valid only if the final estimator implements
decision_function.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].decision_function(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def predict_log_proba(self, X):
"""Applies transforms to the data, and the predict_log_proba method of
the final estimator. Valid only if the final estimator implements
predict_log_proba.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_log_proba(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def transform(self, X):
"""Applies transforms to the data, and the transform method of the
final estimator. Valid only if the final estimator implements
transform.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps:
Xt = transform.transform(Xt)
return Xt
@if_delegate_has_method(delegate='_final_estimator')
def inverse_transform(self, X):
"""Applies inverse transform to the data.
Starts with the last step of the pipeline and applies ``inverse_transform`` in
inverse order of the pipeline steps.
Valid only if all steps of the pipeline implement inverse_transform.
Parameters
----------
X : iterable
Data to inverse transform. Must fulfill output requirements of the
last step of the pipeline.
"""
if X.ndim == 1:
warn("From version 0.19, a 1d X will not be reshaped in"
" pipeline.inverse_transform any more.", FutureWarning)
X = X[None, :]
Xt = X
for name, step in self.steps[::-1]:
Xt = step.inverse_transform(Xt)
return Xt
@if_delegate_has_method(delegate='_final_estimator')
def score(self, X, y=None):
"""Applies transforms to the data, and the score method of the
final estimator. Valid only if the final estimator implements
score.
Parameters
----------
X : iterable
Data to score. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Targets used for scoring. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].score(Xt, y)
@property
def classes_(self):
return self.steps[-1][-1].classes_
@property
def _pairwise(self):
# check if first estimator expects pairwise input
return getattr(self.steps[0][1], '_pairwise', False)
def _name_estimators(estimators):
"""Generate names for estimators."""
names = [type(estimator).__name__.lower() for estimator in estimators]
namecount = defaultdict(int)
for est, name in zip(estimators, names):
namecount[name] += 1
for k, v in list(six.iteritems(namecount)):
if v == 1:
del namecount[k]
for i in reversed(range(len(estimators))):
name = names[i]
if name in namecount:
names[i] += "-%d" % namecount[name]
namecount[name] -= 1
return list(zip(names, estimators))
def make_pipeline(*steps):
"""Construct a Pipeline from the given estimators.
This is a shorthand for the Pipeline constructor; it does not require, and
does not permit, naming the estimators. Instead, they will be given names
automatically based on their types.
Examples
--------
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.preprocessing import StandardScaler
>>> make_pipeline(StandardScaler(), GaussianNB()) # doctest: +NORMALIZE_WHITESPACE
Pipeline(steps=[('standardscaler',
StandardScaler(copy=True, with_mean=True, with_std=True)),
('gaussiannb', GaussianNB())])
Returns
-------
p : Pipeline
"""
return Pipeline(_name_estimators(steps))
def _fit_one_transformer(transformer, X, y):
return transformer.fit(X, y)
def _transform_one(transformer, name, X, transformer_weights):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, muliply output
return transformer.transform(X) * transformer_weights[name]
return transformer.transform(X)
def _fit_transform_one(transformer, name, X, y, transformer_weights,
**fit_params):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, muliply output
if hasattr(transformer, 'fit_transform'):
X_transformed = transformer.fit_transform(X, y, **fit_params)
return X_transformed * transformer_weights[name], transformer
else:
X_transformed = transformer.fit(X, y, **fit_params).transform(X)
return X_transformed * transformer_weights[name], transformer
if hasattr(transformer, 'fit_transform'):
X_transformed = transformer.fit_transform(X, y, **fit_params)
return X_transformed, transformer
else:
X_transformed = transformer.fit(X, y, **fit_params).transform(X)
return X_transformed, transformer
class FeatureUnion(BaseEstimator, TransformerMixin):
"""Concatenates results of multiple transformer objects.
This estimator applies a list of transformer objects in parallel to the
input data, then concatenates the results. This is useful to combine
several feature extraction mechanisms into a single transformer.
Read more in the :ref:`User Guide <feature_union>`.
Parameters
----------
transformer_list: list of (string, transformer) tuples
List of transformer objects to be applied to the data. The first
half of each tuple is the name of the transformer.
n_jobs: int, optional
Number of jobs to run in parallel (default 1).
transformer_weights: dict, optional
Multiplicative weights for features per transformer.
Keys are transformer names, values the weights.
"""
def __init__(self, transformer_list, n_jobs=1, transformer_weights=None):
self.transformer_list = transformer_list
self.n_jobs = n_jobs
self.transformer_weights = transformer_weights
def get_feature_names(self):
"""Get feature names from all transformers.
Returns
-------
feature_names : list of strings
Names of the features produced by transform.
"""
feature_names = []
for name, trans in self.transformer_list:
if not hasattr(trans, 'get_feature_names'):
raise AttributeError("Transformer %s does not provide"
" get_feature_names." % str(name))
feature_names.extend([name + "__" + f for f in
trans.get_feature_names()])
return feature_names
def fit(self, X, y=None):
"""Fit all transformers using X.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data, used to fit transformers.
"""
transformers = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_one_transformer)(trans, X, y)
for name, trans in self.transformer_list)
self._update_transformer_list(transformers)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all transformers using X, transform the data and concatenate
results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
result = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_transform_one)(trans, name, X, y,
self.transformer_weights, **fit_params)
for name, trans in self.transformer_list)
Xs, transformers = zip(*result)
self._update_transformer_list(transformers)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def transform(self, X):
"""Transform X separately by each transformer, concatenate results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
Xs = Parallel(n_jobs=self.n_jobs)(
delayed(_transform_one)(trans, name, X, self.transformer_weights)
for name, trans in self.transformer_list)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def get_params(self, deep=True):
if not deep:
return super(FeatureUnion, self).get_params(deep=False)
else:
out = dict(self.transformer_list)
for name, trans in self.transformer_list:
for key, value in iteritems(trans.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
out.update(super(FeatureUnion, self).get_params(deep=False))
return out
def _update_transformer_list(self, transformers):
self.transformer_list[:] = [
(name, new)
for ((name, old), new) in zip(self.transformer_list, transformers)
]
# XXX it would be nice to have a keyword-only n_jobs argument to this function,
# but that's not allowed in Python 2.x.
def make_union(*transformers):
"""Construct a FeatureUnion from the given transformers.
This is a shorthand for the FeatureUnion constructor; it does not require,
and does not permit, naming the transformers. Instead, they will be given
names automatically based on their types. It also does not allow weighting.
Examples
--------
>>> from sklearn.decomposition import PCA, TruncatedSVD
>>> make_union(PCA(), TruncatedSVD()) # doctest: +NORMALIZE_WHITESPACE
FeatureUnion(n_jobs=1,
transformer_list=[('pca', PCA(copy=True, n_components=None,
whiten=False)),
('truncatedsvd',
TruncatedSVD(algorithm='randomized',
n_components=2, n_iter=5,
random_state=None, tol=0.0))],
transformer_weights=None)
Returns
-------
f : FeatureUnion
"""
return FeatureUnion(_name_estimators(transformers))
| bsd-3-clause |
garethsion/UCL_RSD_Assessment_1 | greengraph/map.py | 1 | 1604 | #!/usr/bin/env python
import numpy as np
from io import BytesIO
from matplotlib import image as img
import requests
class Map(object):
def __init__(self, lat, long, satellite=True, zoom=10,
size=(400,400), sensor=False):
base="http://maps.googleapis.com/maps/api/staticmap?"
params=dict(
sensor= str(sensor).lower(),
zoom= zoom,
size= "x".join(map(str, size)),
center= ",".join(map(str, (lat, long) )),
style="feature:all|element:labels|visibility:off"
)
if satellite:
params["maptype"]="satellite"
self.image = requests.get(base,
params=params).content # Fetch our PNG image data
content = BytesIO(self.image)
self.pixels= img.imread(content) # Parse our PNG image as a numpy array
def green(self, threshold):
# Use NumPy to build an element-by-element logical array
greener_than_red = self.pixels[:,:,1] > threshold* self.pixels[:,:,0]
greener_than_blue = self.pixels[:,:,1] > threshold*self.pixels[:,:,2]
green = np.logical_and(greener_than_red, greener_than_blue)
return green
def count_green(self, threshold = 1.1):
return np.sum(self.green(threshold))
def show_green(self, threshold = 1.1):
green = self.green(threshold)
out = green[:,:,np.newaxis]*np.array([0,1,0])[np.newaxis,np.newaxis,:]
buffer = BytesIO()
result = img.imsave(buffer, out, format='png')
return buffer.getvalue()
| mit |
anna-effeindzourou/trunk | doc/sphinx/ipython_directive.py | 8 | 18579 | # -*- coding: utf-8 -*-
"""Sphinx directive to support embedded IPython code.
This directive allows pasting of entire interactive IPython sessions, prompts
and all, and their code will actually get re-executed at doc build time, with
all prompts renumbered sequentially.
To enable this directive, simply list it in your Sphinx ``conf.py`` file
(making sure the directory where you placed it is visible to sphinx, as is
needed for all Sphinx directives).
By default this directive assumes that your prompts are unchanged IPython ones,
but this can be customized. For example, the following code in your Sphinx
config file will configure this directive for the following input/output
prompts ``Yade [1]:`` and ``-> [1]:``::
import ipython_directive as id
id.rgxin =re.compile(r'(?:In |Yade )\[(\d+)\]:\s?(.*)\s*')
id.rgxout=re.compile(r'(?:Out| -> )\[(\d+)\]:\s?(.*)\s*')
id.fmtin ='Yade [%d]:'
id.fmtout=' -> [%d]:'
id.rc_override=dict(
prompt_in1="Yade [\#]:",
prompt_in2=" .\D..",
prompt_out=" -> [\#]:"
)
id.reconfig_shell()
import ipython_console_highlighting as ich
ich.IPythonConsoleLexer.input_prompt=
re.compile("(Yade \[[0-9]+\]: )|( \.\.\.+:)")
ich.IPythonConsoleLexer.output_prompt=
re.compile("(( -> )|(Out)\[[0-9]+\]: )|( \.\.\.+:)")
ich.IPythonConsoleLexer.continue_prompt=re.compile(" \.\.\.+:")
ToDo
----
- Turn the ad-hoc test() function into a real test suite.
- Break up ipython-specific functionality from matplotlib stuff into better
separated code.
- Make sure %bookmarks used internally are removed on exit.
Authors
-------
- John D Hunter: orignal author.
- Fernando Perez: refactoring, documentation, cleanups.
- VáclavŠmilauer <eudoxos-AT-arcig.cz>: Prompt generatlizations.
"""
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Stdlib
import cStringIO
import imp
import os
import re
import shutil
import sys
import warnings
# To keep compatibility with various python versions
try:
from hashlib import md5
except ImportError:
from md5 import md5
# Third-party
import matplotlib
import sphinx
from docutils.parsers.rst import directives
matplotlib.use('Agg')
# Our own
import IPython
from IPython.Shell import MatplotlibShell
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
sphinx_version = sphinx.__version__.split(".")
# The split is necessary for sphinx beta versions where the string is
# '6b1'
sphinx_version = tuple([int(re.split('[a-z]', x)[0])
for x in sphinx_version[:2]])
COMMENT, INPUT, OUTPUT = range(3)
rc_override = {}
rgxin = re.compile('In \[(\d+)\]:\s?(.*)\s*')
rgxcont = re.compile(' \.+:\s?(.*)\s*')
rgxout = re.compile('Out\[(\d+)\]:\s?(.*)\s*')
fmtin = 'In [%d]:'
fmtout = 'Out[%d]:'
fmtcont = ' .\D.:'
#-----------------------------------------------------------------------------
# Functions and class declarations
#-----------------------------------------------------------------------------
def block_parser(part):
"""
part is a string of ipython text, comprised of at most one
input, one ouput, comments, and blank lines. The block parser
parses the text into a list of::
blocks = [ (TOKEN0, data0), (TOKEN1, data1), ...]
where TOKEN is one of [COMMENT | INPUT | OUTPUT ] and
data is, depending on the type of token::
COMMENT : the comment string
INPUT: the (DECORATOR, INPUT_LINE, REST) where
DECORATOR: the input decorator (or None)
INPUT_LINE: the input as string (possibly multi-line)
REST : any stdout generated by the input line (not OUTPUT)
OUTPUT: the output string, possibly multi-line
"""
block = []
lines = part.split('\n')
N = len(lines)
i = 0
decorator = None
while 1:
if i==N:
# nothing left to parse -- the last line
break
line = lines[i]
i += 1
line_stripped = line.strip()
if line_stripped.startswith('#'):
block.append((COMMENT, line))
continue
if line_stripped.startswith('@'):
# we're assuming at most one decorator -- may need to
# rethink
decorator = line_stripped
continue
# does this look like an input line?
matchin = rgxin.match(line)
if matchin:
lineno, inputline = int(matchin.group(1)), matchin.group(2)
# the ....: continuation string
#continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
#Nc = len(continuation)
# input lines can continue on for more than one line, if
# we have a '\' line continuation char or a function call
# echo line 'print'. The input line can only be
# terminated by the end of the block or an output line, so
# we parse out the rest of the input line if it is
# multiline as well as any echo text
rest = []
while i<N:
# look ahead; if the next line is blank, or a comment, or
# an output line, we're done
nextline = lines[i]
matchout = rgxout.match(nextline)
matchcont = rgxcont.match(nextline)
#print "nextline=%s, continuation=%s, starts=%s"%(nextline, continuation, nextline.startswith(continuation))
if matchout or nextline.startswith('#'):
break
elif matchcont: #nextline.startswith(continuation):
inputline += '\n' + matchcont.group(1) #nextline[Nc:]
else:
rest.append(nextline)
i+= 1
block.append((INPUT, (decorator, inputline, '\n'.join(rest))))
continue
# if it looks like an output line grab all the text to the end
# of the block
matchout = rgxout.match(line)
if matchout:
lineno, output = int(matchout.group(1)), matchout.group(2)
if i<N-1:
output = '\n'.join([output] + lines[i:])
block.append((OUTPUT, output))
break
return block
class EmbeddedSphinxShell(object):
"""An embedded IPython instance to run inside Sphinx"""
def __init__(self):
self.cout = cStringIO.StringIO()
IPython.Shell.Term.cout = self.cout
IPython.Shell.Term.cerr = self.cout
argv = ['-autocall', '0']
self.user_ns = {}
self.user_glocal_ns = {}
self.IP = IPython.ipmaker.make_IPython(
argv, self.user_ns, self.user_glocal_ns, embedded=True,
#shell_class=IPython.Shell.InteractiveShell,
shell_class=MatplotlibShell,
rc_override = dict(colors = 'NoColor', **rc_override))
self.input = ''
self.output = ''
self.is_verbatim = False
self.is_doctest = False
self.is_suppress = False
# on the first call to the savefig decorator, we'll import
# pyplot as plt so we can make a call to the plt.gcf().savefig
self._pyplot_imported = False
# we need bookmark the current dir first so we can save
# relative to it
self.process_input_line('bookmark ipy_basedir')
self.cout.seek(0)
self.cout.truncate(0)
def process_input_line(self, line):
"""process the input, capturing stdout"""
#print "input='%s'"%self.input
stdout = sys.stdout
sys.stdout = self.cout
#self.IP.resetbuffer()
self.IP.push(self.IP.prefilter(line, 0))
#self.IP.runlines(line)
sys.stdout = stdout
# Callbacks for each type of token
def process_input(self, data, input_prompt, lineno):
"""Process data block for INPUT token."""
decorator, input, rest = data
image_file = None
#print 'INPUT:', data
is_verbatim = decorator=='@verbatim' or self.is_verbatim
is_doctest = decorator=='@doctest' or self.is_doctest
is_suppress = decorator=='@suppress' or self.is_suppress
is_savefig = decorator is not None and \
decorator.startswith('@savefig')
input_lines = input.split('\n')
#continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
#Nc = len(continuation)
if is_savefig:
saveargs = decorator.split(' ')
filename = saveargs[1]
outfile = os.path.join('_static/%s'%filename)
# build out an image directive like
# .. image:: somefile.png
# :width 4in
#
# from an input like
# savefig somefile.png width=4in
imagerows = ['.. image:: %s'%outfile]
for kwarg in saveargs[2:]:
arg, val = kwarg.split('=')
arg = arg.strip()
val = val.strip()
imagerows.append(' :%s: %s'%(arg, val))
image_file = outfile
image_directive = '\n'.join(imagerows)
# TODO: can we get "rest" from ipython
#self.process_input_line('\n'.join(input_lines))
ret = []
is_semicolon = False
for i, line in enumerate(input_lines):
if line.endswith(';'):
is_semicolon = True
if i==0:
# process the first input line
if is_verbatim:
self.process_input_line('')
else:
# only submit the line in non-verbatim mode
self.process_input_line(line)
formatted_line = '%s %s'%(input_prompt, line)
else:
# process a continuation line
if not is_verbatim:
self.process_input_line(line)
formatted_line = fmtcont.replace('\D','.'*len(str(lineno)))+line #'%s %s'%(continuation, line)
if not is_suppress:
ret.append(formatted_line)
if not is_suppress:
if len(rest.strip()):
if is_verbatim:
# the "rest" is the standard output of the
# input, which needs to be added in
# verbatim mode
ret.append(rest)
self.cout.seek(0)
output = self.cout.read()
if not is_suppress and not is_semicolon:
ret.append(output)
self.cout.truncate(0)
return ret, input_lines, output, is_doctest, image_file
#print 'OUTPUT', output # dbg
def process_output(self, data, output_prompt,
input_lines, output, is_doctest, image_file):
"""Process data block for OUTPUT token."""
if is_doctest:
submitted = data.strip()
found = output
if found is not None:
ind = found.find(output_prompt)
if ind<0:
raise RuntimeError('output prompt="%s" does not match out line=%s'%(output_prompt, found))
found = found[len(output_prompt):].strip()
if found!=submitted:
raise RuntimeError('doctest failure for input_lines="%s" with found_output="%s" and submitted output="%s"'%(input_lines, found, submitted))
#print 'doctest PASSED for input_lines="%s" with found_output="%s" and submitted output="%s"'%(input_lines, found, submitted)
def process_comment(self, data):
"""Process data block for COMMENT token."""
if not self.is_suppress:
return [data]
def process_block(self, block):
"""
process block from the block_parser and return a list of processed lines
"""
ret = []
output = None
input_lines = None
m = rgxin.match(str(self.IP.outputcache.prompt1).strip())
lineno = int(m.group(1))
input_prompt = fmtin%lineno
output_prompt = fmtout%lineno
image_file = None
image_directive = None
# XXX - This needs a second refactor. There's too much state being
# held globally, which makes for a very awkward interface and large,
# hard to test functions. I've already broken this up at least into
# three separate processors to isolate the logic better, but this only
# serves to highlight the coupling. Next we need to clean it up...
for token, data in block:
if token==COMMENT:
out_data = self.process_comment(data)
elif token==INPUT:
out_data, input_lines, output, is_doctest, image_file= \
self.process_input(data, input_prompt, lineno)
elif token==OUTPUT:
out_data = \
self.process_output(data, output_prompt,
input_lines, output, is_doctest,
image_file)
if out_data:
ret.extend(out_data)
if image_file is not None:
self.ensure_pyplot()
command = 'plt.gcf().savefig("%s")'%image_file
#print 'SAVEFIG', command # dbg
self.process_input_line('bookmark ipy_thisdir')
self.process_input_line('cd -b ipy_basedir')
self.process_input_line(command)
self.process_input_line('cd -b ipy_thisdir')
self.cout.seek(0)
self.cout.truncate(0)
return ret, image_directive
def ensure_pyplot(self):
if self._pyplot_imported:
return
self.process_input_line('import matplotlib.pyplot as plt')
# A global instance used below. XXX: not sure why this can't be created inside
# ipython_directive itself.
shell = EmbeddedSphinxShell()
def reconfig_shell():
"""Called after setting module-level variables to re-instantiate
with the set values (since shell is instantiated first at import-time
when module variables have default values)"""
global shell
shell = EmbeddedSphinxShell()
def ipython_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine,
):
debug = ipython_directive.DEBUG
shell.is_suppress = options.has_key('suppress')
shell.is_doctest = options.has_key('doctest')
shell.is_verbatim = options.has_key('verbatim')
#print 'ipy', shell.is_suppress, options
parts = '\n'.join(content).split('\n\n')
lines = ['.. sourcecode:: ipython', '']
figures = []
for part in parts:
block = block_parser(part)
if len(block):
rows, figure = shell.process_block(block)
for row in rows:
lines.extend([' %s'%line for line in row.split('\n')])
if figure is not None:
figures.append(figure)
for figure in figures:
lines.append('')
lines.extend(figure.split('\n'))
lines.append('')
#print lines
if len(lines)>2:
if debug:
print '\n'.join(lines)
else:
#print 'INSERTING %d lines'%len(lines)
state_machine.insert_input(
lines, state_machine.input_lines.source(0))
return []
ipython_directive.DEBUG = False
# Enable as a proper Sphinx directive
def setup(app):
setup.app = app
options = {'suppress': directives.flag,
'doctest': directives.flag,
'verbatim': directives.flag,
}
app.add_directive('ipython', ipython_directive, True, (0, 2, 0), **options)
# Simple smoke test, needs to be converted to a proper automatic test.
def test():
examples = [
r"""
In [9]: pwd
Out[9]: '/home/jdhunter/py4science/book'
In [10]: cd bookdata/
/home/jdhunter/py4science/book/bookdata
In [2]: from pylab import *
In [2]: ion()
In [3]: im = imread('stinkbug.png')
@savefig mystinkbug.png width=4in
In [4]: imshow(im)
Out[4]: <matplotlib.image.AxesImage object at 0x39ea850>
""",
r"""
In [1]: x = 'hello world'
# string methods can be
# used to alter the string
@doctest
In [2]: x.upper()
Out[2]: 'HELLO WORLD'
@verbatim
In [3]: x.st<TAB>
x.startswith x.strip
""",
r"""
In [130]: url = 'http://ichart.finance.yahoo.com/table.csv?s=CROX\
.....: &d=9&e=22&f=2009&g=d&a=1&br=8&c=2006&ignore=.csv'
In [131]: print url.split('&')
['http://ichart.finance.yahoo.com/table.csv?s=CROX', 'd=9', 'e=22', 'f=2009', 'g=d', 'a=1', 'b=8', 'c=2006', 'ignore=.csv']
In [60]: import urllib
""",
r"""\
In [133]: import numpy.random
@suppress
In [134]: numpy.random.seed(2358)
@doctest
In [135]: np.random.rand(10,2)
Out[135]:
array([[ 0.64524308, 0.59943846],
[ 0.47102322, 0.8715456 ],
[ 0.29370834, 0.74776844],
[ 0.99539577, 0.1313423 ],
[ 0.16250302, 0.21103583],
[ 0.81626524, 0.1312433 ],
[ 0.67338089, 0.72302393],
[ 0.7566368 , 0.07033696],
[ 0.22591016, 0.77731835],
[ 0.0072729 , 0.34273127]])
""",
r"""
In [106]: print x
jdh
In [109]: for i in range(10):
.....: print i
.....:
.....:
0
1
2
3
4
5
6
7
8
9
""",
r"""
In [144]: from pylab import *
In [145]: ion()
# use a semicolon to suppress the output
@savefig test_hist.png width=4in
In [151]: hist(np.random.randn(10000), 100);
@savefig test_plot.png width=4in
In [151]: plot(np.random.randn(10000), 'o');
""",
r"""
# use a semicolon to suppress the output
In [151]: plt.clf()
@savefig plot_simple.png width=4in
In [151]: plot([1,2,3])
@savefig hist_simple.png width=4in
In [151]: hist(np.random.randn(10000), 100);
""",
r"""
# update the current fig
In [151]: ylabel('number')
In [152]: title('normal distribution')
@savefig hist_with_text.png
In [153]: grid(True)
""",
]
ipython_directive.DEBUG = True
#options = dict(suppress=True)
options = dict()
for example in examples:
content = example.split('\n')
ipython_directive('debug', arguments=None, options=options,
content=content, lineno=0,
content_offset=None, block_text=None,
state=None, state_machine=None,
)
# Run test suite as a script
if __name__=='__main__':
test()
| gpl-2.0 |
qbilius/streams | streams/utils.py | 1 | 13227 | import functools
import numpy as np
import scipy.stats
import pandas
import matplotlib.pyplot as plt
import seaborn as sns
def splithalf(data, aggfunc=np.nanmean, rng=None):
data = np.array(data)
if rng is None:
rng = np.random.RandomState(None)
inds = list(range(data.shape[0]))
rng.shuffle(inds)
half = len(inds) // 2
split1 = aggfunc(data[inds[:half]], axis=0)
split2 = aggfunc(data[inds[half:2*half]], axis=0)
return split1, split2
def pearsonr_matrix(data1, data2, axis=1):
rs = []
for i in range(data1.shape[axis]):
d1 = np.take(data1, i, axis=axis)
d2 = np.take(data2, i, axis=axis)
r, p = scipy.stats.pearsonr(d1, d2)
rs.append(r)
return np.array(rs)
def spearman_brown_correct(pearsonr, n=2):
pearsonr = np.array(pearsonr)
return n * pearsonr / (1 + (n-1) * pearsonr)
def resample(data, rng=None):
data = np.array(data)
if rng is None:
rng = np.random.RandomState(None)
inds = rng.choice(range(data.shape[0]), size=data.shape[0], replace=True)
return data[inds]
def bootstrap_resample(data, func=np.mean, niter=100, ci=95, rng=None):
df = [func(resample(data, rng=rng)) for i in range(niter)]
if ci is not None:
return np.percentile(df, 50-ci/2.), np.percentile(df, 50+ci/2.)
else:
return df
def _timeplot_bootstrap(x, estimator=np.mean, ci=95, n_boot=100):
ci = bootstrap_resample(x, func=estimator, ci=ci, niter=n_boot)
return pandas.Series({'emin': ci[0], 'emax': ci[1]})
def timeplot(data=None, x=None, y=None, hue=None,
estimator=np.mean, ci=95, n_boot=100,
col=None, row=None, sharex=None, sharey=None,
legend_loc='lower right', **fig_kwargs):
if hue is None:
hues = ['']
else:
hues = data[hue].unique()
if data[hue].dtype.name == 'category': hues = hues.sort_values()
# plt.figure()
if row is None:
row_orig = None
tmp = 'row_{}'
i = 0
row = tmp.format(i)
while row in data:
i += 1
row = tmp.format(i)
data[row] = 'row'
else:
row_orig = row
if col is None:
col_orig = None
tmp = 'col_{}'
i = 0
col = tmp.format(i)
while col in data:
i += 1
col = tmp.format(i)
data[col] = 'col'
else:
col_orig = col
if row is not None:
rows = data[row].unique()
if data[row].dtype.name == 'category': rows = rows.sort_values()
else:
rows = [(None, None)]
if col is not None:
cols = data[col].unique()
if data[col].dtype.name == 'category': cols = cols.sort_values()
else:
cols = [(None, None)]
fig, axes = plt.subplots(nrows=len(rows), ncols=len(cols), **fig_kwargs)
if hasattr(axes, 'shape'):
axes = axes.reshape([len(rows), len(cols)])
else:
axes = np.array([[axes]])
xlim = data.groupby([row, col])[x].apply(lambda x: {'amin': x.min(), 'amax': x.max()}).unstack()
ylim = data.groupby([row, col])[y].apply(lambda x: {'amin': x.min(), 'amax': x.max()}).unstack()
if sharex == 'row':
for r in rows:
xlim.loc[r, 'amin'] = xlim.loc[r, 'amin'].min()
xlim.loc[r, 'amax'] = xlim.loc[r, 'amax'].max()
elif sharex == 'col':
for c in cols:
xlim.loc[(slice(None), c), 'amin'] = xlim.loc[(slice(None), c), 'amin'].min()
xlim.loc[(slice(None), c), 'amax'] = xlim.loc[(slice(None), c), 'amax'].max()
elif sharex == 'both':
xlim.loc[:, 'amin'] = xlim.loc[:, 'amin'].min()
xlim.loc[:, 'amax'] = xlim.loc[:, 'amax'].min()
elif isinstance(sharex, (tuple, list)):
xlim.loc[:, 'amin'] = sharex[0]
xlim.loc[:, 'amax'] = sharex[1]
if sharey == 'row':
for r in rows:
ylim.loc[r, 'amin'] = ylim.loc[r, 'amin'].min()
ylim.loc[r, 'amax'] = ylim.loc[r, 'amax'].max()
elif sharey == 'col':
for c in cols:
ylim.loc[(slice(None), c), 'amin'] = ylim.loc[(slice(None), c), 'amin'].min()
ylim.loc[(slice(None), c), 'amax'] = ylim.loc[(slice(None), c), 'amax'].max()
elif sharey == 'both':
ylim.loc[:, 'amin'] = ylim.loc[:, 'amin'].min()
ylim.loc[:, 'amax'] = ylim.loc[:, 'amax'].min()
elif isinstance(sharey, (tuple, list)):
ylim.loc[:, 'amin'] = sharey[0]
ylim.loc[:, 'amax'] = sharey[1]
for rno, r in enumerate(rows):
for cno, c in enumerate(cols):
ax = axes[rno,cno]
for h, color in zip(hues, sns.color_palette(n_colors=len(hues))):
if hue is None:
d = data
else:
d = data[data[hue] == h]
sel_col = d[col] == c if col is not None else True
sel_row = d[row] == r if row is not None else True
if not (col is None and row is None):
d = d[sel_row & sel_col]
# if c == 'hvm_test': import ipdb; ipdb.set_trace()
if len(d) > 0:
mn = d.groupby(x)[y].apply(estimator)
def bootstrap(x):
try:
y = _timeplot_bootstrap(x[x.notnull()], estimator, ci, n_boot)
except:
y = _timeplot_bootstrap(x, estimator, ci, n_boot)
return y
if n_boot > 0:
ebars = d.groupby(x)[y].apply(bootstrap).unstack()
ax.fill_between(mn.index, ebars.emin, ebars.emax, alpha=.5, color=color)
ax.plot(mn.index, mn, linewidth=2, color=color, label=h)
else:
ax.set_visible(False)
try:
ax.set_xlim([xlim.loc[(r, c), 'amin'], xlim.loc[(r, c), 'amax']])
except:
pass
try:
ax.set_ylim([ylim.loc[(r, c), 'amin'], ylim.loc[(r, c), 'amax']])
except:
pass
if ax.is_last_row():
ax.set_xlabel(x)
if ax.is_first_col():
ax.set_ylabel(y)
if row_orig is None:
if col_orig is None:
ax.set_title('')
else:
ax.set_title('{} = {}'.format(col_orig, c))
else:
if col_orig is None:
ax.set_title('{} = {}'.format(row_orig, r))
else:
ax.set_title('{} = {} | {} = {}'.format(row_orig, r, col_orig, c))
if hue is not None:
plt.legend(loc=legend_loc, framealpha=.25)
plt.tight_layout()
return axes
def clean_data(df, std_thres=3, stim_dur_thres=1000./120):
"""
Remove outliers from behavioral data
What is removed:
- If response time is more than `std_thres` standard deviations above
the mean response time to all stimuli (default: 3)
- If the recorded stimulus duration differs by more than `std_thres`
from the requested stimulus duration (default: half a frame for 60 Hz)
:Args:
df - pandas.DataFrame
:Kwargs:
- std_thres (float, default: 3)
- stim_dur_thres (float, default: 1000./120)
:Returns:
pandas.DataFrame that has the outliers removed (not nanned)
"""
fast_rts = np.abs(df.rt - df.rt.mean()) < 3 * df.rt.std()
good_present_time = np.abs(df.actual_stim_dur - df.stim_dur) < stim_dur_thres # half a frame
print('Response too slow: {} out of {}'.format(len(df) - fast_rts.sum(), len(df)))
print('Stimulus presentation too slow: {} out of {}'.format(len(df) - good_present_time.sum(), len(df)))
df = df[fast_rts & good_present_time]
return df
def lazy_property(function):
"""
From: https://danijar.com/structuring-your-tensorflow-models/
"""
attribute = '_cache_' + function.__name__
@property
@functools.wraps(function)
def decorator(self):
if not hasattr(self, attribute):
setattr(self, attribute, function(self))
return getattr(self, attribute)
return decorator
# def hitrate_to_dprime(df, cap=5):
# # df = pandas.DataFrame(hitrate, index=labels, columns=order)
# out = np.zeros_like(df)
# for (i,j), hit_rate in np.ndenumerate(df.values):
# target = df.index[i]
# distr = df.columns[j]
# if target == distr:
# dprime = np.nan
# else:
# miss_rate = df.loc[df.index == target, distr].mean()
# hit = hit_rate / (hit_rate + miss_rate)
# fa_rate = df.loc[df.index == distr, target].mean()
# rej_rate = df.loc[df.index == distr, distr].mean()
# fa = fa_rate / (fa_rate + rej_rate)
# dprime = scipy.stats.norm.ppf(hit) - scipy.stats.norm.ppf(fa)
# if dprime > cap: dprime = cap
# out[i,j] = dprime
# return out
def hitrate_to_dprime_o1(df, cap=20):
# df = pandas.DataFrame(hitrate, index=labels, columns=order)
targets = df.index.unique()
# distrs = df.columns.unique()
# out = pandas.DataFrame(np.zeros([len(targets), len(distrs)]), index=targets, columns=distrs)
out = pandas.Series(np.zeros(len(targets)), index=targets)
for target in targets:
# if target == 'lo_poly_animal_RHINO_2': import ipdb; ipdb.set_trace()
hit_rate = np.nanmean(df.loc[df.index == target])
# miss_rate = 1 - np.nanmean(df.loc[df.index == target])
fa_rate = np.nanmean(1 - df.loc[df.index != target, target])
dprime = scipy.stats.norm.ppf(hit_rate) - scipy.stats.norm.ppf(fa_rate)
dprime = np.clip(dprime, -cap, cap)
out[target] = dprime
return out
# for distr in distrs:
# # for (i,j), hit_rate in np.ndenumerate(df.values):
# if target == distr:
# dprime = np.nan
# else:
# hit_rate = df.loc[df.index == target].mean()
# miss_rate = df.loc[df.index == target, distr].mean()
# hit = hit_rate / (hit_rate + miss_rate)
# fa_rate = df.loc[df.index == distr, target].mean()
# rej_rate = df.loc[df.index == distr, distr].mean()
# fa = fa_rate / (fa_rate + rej_rate)
# dprime = scipy.stats.norm.ppf(hit) - scipy.stats.norm.ppf(fa)
# if dprime > cap: dprime = cap
# out[target, distr] = dprime
# return out
def hitrate_to_dprime_i1n(df, cap=20, normalize=True):
out = pandas.Series(np.zeros(len(df)),
index=df.set_index(['obj', 'id']).index)
for (target, idd), row in df.iterrows():
hit_rate = row.acc
# miss_rate = 1 - np.nanmean(df.loc[df.index == target])
rej = df.loc[df.obj != target, target]
fa_rate = 1 - np.nanmean(rej)
dprime = scipy.stats.norm.ppf(hit_rate) - scipy.stats.norm.ppf(fa_rate)
dprime = np.clip(dprime, -cap, cap)
out.loc[(target, idd)] = dprime
if normalize:
out.acc -= out.groupby('obj').acc.transform(lambda x: x.mean())
return out
def hitrate_to_dprime_i2n(df, cap=20):
# df = pandas.DataFrame(hitrate, index=labels, columns=order)
# targets = df.index.unique()
# distrs = df.columns.unique()
# out = pandas.DataFrame(np.zeros([len(targets), len(distrs)]), index=targets, columns=distrs)
# df = df.set_index(['obj', 'id', 'distr'])
# out = pandas.DataFrame(np.zeros(len(df), len(df.distr.unique()), index=df.index, columns=df.columns)
out = df.set_index(['obj', 'id', 'distr']).copy()
for (target, idd, distr), hit_rate in out.iterrows():
if target == distr:
out.loc[(target, idd, distr)] = np.nan
else:
# if target == 'lo_poly_animal_RHINO_2': import ipdb; ipdb.set_trace()
# hit_rate = acc
# miss_rate = 1 - np.nanmean(df.loc[df.index == target])
rej = df.loc[(df.obj == distr) & (df.distr == target), 'acc']
# import ipdb; ipdb.set_trace()
fa_rate = 1 - np.nanmean(rej)
dprime = scipy.stats.norm.ppf(hit_rate) - scipy.stats.norm.ppf(fa_rate)
# if target == 'lo_poly_animal_RHINO_2' and distr == 'MB30758' and idd == 'e387f6375d1d01a92f02394ea0c2c89de4ec4f61':
# import ipdb; ipdb.set_trace()
# hit_rate_norm = np.nanmean(df.loc[(df.obj == target) & (df.distr == distr), 'acc'])
# dprime_norm = scipy.stats.norm.ppf(hit_rate_norm) - scipy.stats.norm.ppf(fa_rate)
# dprime -= dprime_norm
out.loc[(target, idd, distr)] = dprime
# def ff(x):
# import ipdb; ipdb.set_trace()
# return x.mean()
out = out.reset_index()
out.acc -= out.groupby(['obj', 'distr']).acc.transform(lambda x: x.mean())
out.acc = np.clip(out.acc, -cap, cap)
# for (target, idd, distr), dprime in out.iterrows():
# out.loc[(target, idd, distr)] = dprime
# dprime = np.clip(dprime, -cap, cap)
return out | gpl-3.0 |
SB-BISS/RLACOSarsaLambda | Mountain_Car_Grid_Search.py | 1 | 5705 | '''
This is a GRID search to find the best parameters of an algorithm.
In future developments it will be parallelized.
In the Mountain Car Problem, for Sarsa with Tile Coding, we
have the parameters from Sutton and Barto Book
alpha = 0.5 (then divided by num tilings, so it becomes 0.5/0.8, check the implementation of the agents)
decaying factor = 0.96
lambda = 0.96
Discretization = 8,8
'''
import gym
from gym import envs
from agents import TabularSarsaAgent
from agents import ApproximatedSarsaLambdaAgent
from agents import HAApproximatedSarsaLambdaAgent
from static_heuristics.MountainCarHeuristic import MountainCarHeuristic
from agents import StaticHeuristicApproximatedSarsaLambdaAgent
import numpy as np
import matplotlib.pyplot as plt
from gym_maze.envs.maze_env import *
import time
from model.mc_model import mc_model
import pickle
print(envs.registry.all())
env = gym.make("MountainCar-v0")
env._max_episode_steps = 1000
repetitions = 25
episodes = 20
env.reset()
obs_mins = env.observation_space.low
obs_maxs = env.observation_space.high #[env.observation_space[0].max_value, env.observation_space[1].max_value]
print obs_mins
print obs_maxs
discretizations = [10,10] #position and velocity.
num_tilings = 10
total_result = []
rend = False # render or not.
#values for Rho
rho_pos = [0.1,0.3,0.6,0.9,0.99] # [0.1,0.5,0.99] #3
#values for psi, for the heuristic
psi_pos = [0.001,0.01,0.1,0.3,0.5] # [0.001,0.01,0.1,0.3,0.5] # 5
#values of nu, for the heuristic
nu_pos = [1,5,10]
#values for discount factor
discount_pos = [1] # not discounted
lmbd = [0.9]# Lambda get the same value. Fixed, following Sutton and Barto book, but only for replacing traces...
alpha_pos = [0.5] #it becomes 0.5/8, given the num tilings above
eps_pos = [0.025] #decaying exploration
# one iteration of the grid search
algorithms = ["NOH","SH","H"]
Strategies = ["Replacing","TrueOnline"]
algo = algorithms[1]
strat = Strategies[1]
hard_soft = "soft"
model_based = True
z= 0 #counter
for eps in eps_pos:
for rho in rho_pos:
for psi in psi_pos:
for dis in discount_pos:
for nu in nu_pos:
for alpha in alpha_pos:
config = { "Strategy" : strat,
"Pheromone_strategy": hard_soft,
"decrease_exploration" : True, #Mountain Car has a decaying eploration
"learning_rate" : alpha,
"psi": psi,
"rho": rho,
"model" : mc_model(),
"static_heuristic": MountainCarHeuristic(model= mc_model(),actions_number=3),
"model_based":model_based,
"eps": eps,
"nu":nu, # Epsilon in epsilon greedy policies
"lambda":lmbd[0],
"discount": dis,
"n_iter": env._max_episode_steps}
times = np.zeros(episodes)
results = np.zeros(episodes)
print z
for j in range(repetitions): # this is to decide for the parameter
if algo=="NOH":
ag = ApproximatedSarsaLambdaAgent.ApproximatedSarsaLambdaAgent(obs_mins,obs_maxs,env.action_space,discretizations,[num_tilings], my_config=config)
elif algo =="SH":
ag = StaticHeuristicApproximatedSarsaLambdaAgent.StaticHeuristicApproximatedSarsaLambdaAgent(obs_mins,obs_maxs,env.action_space,discretizations,[num_tilings], my_config=config)
else:
ag = HAApproximatedSarsaLambdaAgent.HAApproximatedSarsaLambdaAgent(obs_mins,obs_maxs,env.action_space,discretizations,[num_tilings], my_config=config)
for i in range(episodes):
tb = time.time()
ag.learn(env,rend)
te = time.time()
tdiff= te-tb
res= ag.return_last_steps()
results[i] = results[i]+res[i]
print res[i]
times[i] = times[i] + tdiff
print i
#print (res[-1], [eps,rho,psi,dis,dis,alpha])
#in the maze grid search you are looking for the one with the smallest cumulative_sum
total_result.append({"parameters": [eps,rho,psi,dis,lmbd,alpha,nu] , "times":times/repetitions, "20thep": results[-1]/repetitions, "results":results/repetitions, "cumulative_sum": np.sum(results/repetitions)})
# env.step(env.action_space.sample()) # take a random action
z = z+1
with open("Mountain_car_"+algo+"_" + strat + "_" + hard_soft + "model"+str(model_based)+ ".pkl", 'wb') as f:
pickle.dump(total_result, f)
#Saving the result of the GRID Search
| mit |
potash/scikit-learn | examples/ensemble/plot_voting_decision_regions.py | 86 | 2386 | """
==================================================
Plot the decision boundaries of a VotingClassifier
==================================================
Plot the decision boundaries of a `VotingClassifier` for
two features of the Iris dataset.
Plot the class probabilities of the first sample in a toy dataset
predicted by three different classifiers and averaged by the
`VotingClassifier`.
First, three exemplary classifiers are initialized (`DecisionTreeClassifier`,
`KNeighborsClassifier`, and `SVC`) and used to initialize a
soft-voting `VotingClassifier` with weights `[2, 1, 2]`, which means that
the predicted probabilities of the `DecisionTreeClassifier` and `SVC`
count 5 times as much as the weights of the `KNeighborsClassifier` classifier
when the averaged probability is calculated.
"""
print(__doc__)
from itertools import product
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.ensemble import VotingClassifier
# Loading some example data
iris = datasets.load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
# Training classifiers
clf1 = DecisionTreeClassifier(max_depth=4)
clf2 = KNeighborsClassifier(n_neighbors=7)
clf3 = SVC(kernel='rbf', probability=True)
eclf = VotingClassifier(estimators=[('dt', clf1), ('knn', clf2),
('svc', clf3)],
voting='soft', weights=[2, 1, 2])
clf1.fit(X, y)
clf2.fit(X, y)
clf3.fit(X, y)
eclf.fit(X, y)
# Plotting decision regions
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1),
np.arange(y_min, y_max, 0.1))
f, axarr = plt.subplots(2, 2, sharex='col', sharey='row', figsize=(10, 8))
for idx, clf, tt in zip(product([0, 1], [0, 1]),
[clf1, clf2, clf3, eclf],
['Decision Tree (depth=4)', 'KNN (k=7)',
'Kernel SVM', 'Soft Voting']):
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
axarr[idx[0], idx[1]].contourf(xx, yy, Z, alpha=0.4)
axarr[idx[0], idx[1]].scatter(X[:, 0], X[:, 1], c=y, alpha=0.8)
axarr[idx[0], idx[1]].set_title(tt)
plt.show()
| bsd-3-clause |
jlcarmic/producthunt_simulator | venv/lib/python2.7/site-packages/numpy/lib/twodim_base.py | 83 | 26903 | """ Basic functions for manipulating 2d arrays
"""
from __future__ import division, absolute_import, print_function
from numpy.core.numeric import (
asanyarray, arange, zeros, greater_equal, multiply, ones, asarray,
where, int8, int16, int32, int64, empty, promote_types, diagonal,
)
from numpy.core import iinfo
__all__ = [
'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'rot90', 'tri', 'triu',
'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices',
'tril_indices_from', 'triu_indices', 'triu_indices_from', ]
i1 = iinfo(int8)
i2 = iinfo(int16)
i4 = iinfo(int32)
def _min_int(low, high):
""" get small int that fits the range """
if high <= i1.max and low >= i1.min:
return int8
if high <= i2.max and low >= i2.min:
return int16
if high <= i4.max and low >= i4.min:
return int32
return int64
def fliplr(m):
"""
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array, must be at least 2-D.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to A[:,::-1]. Requires the array to be at least 2-D.
Examples
--------
>>> A = np.diag([1.,2.,3.])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.fliplr(A)
array([[ 0., 0., 1.],
[ 0., 2., 0.],
[ 3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A)==A[:,::-1,...])
True
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must be >= 2-d.")
return m[:, ::-1]
def flipud(m):
"""
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``A[::-1,...]``.
Does not require the array to be two-dimensional.
Examples
--------
>>> A = np.diag([1.0, 2, 3])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.flipud(A)
array([[ 0., 0., 3.],
[ 0., 2., 0.],
[ 1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A)==A[::-1,...])
True
>>> np.flipud([1,2])
array([2, 1])
"""
m = asanyarray(m)
if m.ndim < 1:
raise ValueError("Input must be >= 1-d.")
return m[::-1, ...]
def rot90(m, k=1):
"""
Rotate an array by 90 degrees in the counter-clockwise direction.
The first two dimensions are rotated; therefore, the array must be at
least 2-D.
Parameters
----------
m : array_like
Array of two or more dimensions.
k : integer
Number of times the array is rotated by 90 degrees.
Returns
-------
y : ndarray
Rotated array.
See Also
--------
fliplr : Flip an array horizontally.
flipud : Flip an array vertically.
Examples
--------
>>> m = np.array([[1,2],[3,4]], int)
>>> m
array([[1, 2],
[3, 4]])
>>> np.rot90(m)
array([[2, 4],
[1, 3]])
>>> np.rot90(m, 2)
array([[4, 3],
[2, 1]])
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must >= 2-d.")
k = k % 4
if k == 0:
return m
elif k == 1:
return fliplr(m).swapaxes(0, 1)
elif k == 2:
return fliplr(flipud(m))
else:
# k == 3
return fliplr(m.swapaxes(0, 1))
def eye(N, M=None, k=0, dtype=float):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
See Also
--------
identity : (almost) equivalent function
diag : diagonal 2-D array from a 1-D array specified by the user.
Examples
--------
>>> np.eye(2, dtype=int)
array([[1, 0],
[0, 1]])
>>> np.eye(3, k=1)
array([[ 0., 1., 0.],
[ 0., 0., 1.],
[ 0., 0., 0.]])
"""
if M is None:
M = N
m = zeros((N, M), dtype=dtype)
if k >= M:
return m
if k >= 0:
i = k
else:
i = (-k) * M
m[:M-k].flat[i::M+1] = 1
return m
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
See the more detailed documentation for ``numpy.diagonal`` if you use this
function to extract a diagonal and wish to write to the resulting array;
whether it returns a copy or a view depends on what version of numpy you
are using.
Parameters
----------
v : array_like
If `v` is a 2-D array, return a copy of its `k`-th diagonal.
If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
diagonal.
k : int, optional
Diagonal in question. The default is 0. Use `k>0` for diagonals
above the main diagonal, and `k<0` for diagonals below the main
diagonal.
Returns
-------
out : ndarray
The extracted diagonal or constructed diagonal array.
See Also
--------
diagonal : Return specified diagonals.
diagflat : Create a 2-D array with the flattened input as a diagonal.
trace : Sum along diagonals.
triu : Upper triangle of an array.
tril : Lower triangle of an array.
Examples
--------
>>> x = np.arange(9).reshape((3,3))
>>> x
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.diag(x)
array([0, 4, 8])
>>> np.diag(x, k=1)
array([1, 5])
>>> np.diag(x, k=-1)
array([3, 7])
>>> np.diag(np.diag(x))
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 8]])
"""
v = asanyarray(v)
s = v.shape
if len(s) == 1:
n = s[0]+abs(k)
res = zeros((n, n), v.dtype)
if k >= 0:
i = k
else:
i = (-k) * n
res[:n-k].flat[i::n+1] = v
return res
elif len(s) == 2:
return diagonal(v, k)
else:
raise ValueError("Input must be 1- or 2-d.")
def diagflat(v, k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
v : array_like
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set; 0, the default, corresponds to the "main" diagonal,
a positive (negative) `k` giving the number of the diagonal above
(below) the main.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : MATLAB work-alike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
try:
wrap = v.__array_wrap__
except AttributeError:
wrap = None
v = asarray(v).ravel()
s = len(v)
n = s + abs(k)
res = zeros((n, n), v.dtype)
if (k >= 0):
i = arange(0, n-k)
fi = i+k+i*n
else:
i = arange(0, n+k)
fi = i+(i-k)*n
res.flat[fi] = v
if not wrap:
return res
return wrap(res)
def tri(N, M=None, k=0, dtype=float):
"""
An array with ones at and below the given diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal at and below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
Returns
-------
tri : ndarray of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.
Examples
--------
>>> np.tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> np.tri(3, 5, -1)
array([[ 0., 0., 0., 0., 0.],
[ 1., 0., 0., 0., 0.],
[ 1., 1., 0., 0., 0.]])
"""
if M is None:
M = N
m = greater_equal.outer(arange(N, dtype=_min_int(0, N)),
arange(-k, M-k, dtype=_min_int(-k, M - k)))
# Avoid making a copy if the requested type is already bool
m = m.astype(dtype, copy=False)
return m
def tril(m, k=0):
"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
tril : ndarray, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : same thing, only for the upper triangle
Examples
--------
>>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 0, 0, 0],
[ 4, 0, 0],
[ 7, 8, 0],
[10, 11, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k, dtype=bool)
return where(mask, m, zeros(1, m.dtype))
def triu(m, k=0):
"""
Upper triangle of an array.
Return a copy of a matrix with the elements below the `k`-th diagonal
zeroed.
Please refer to the documentation for `tril` for further details.
See Also
--------
tril : lower triangle of an array
Examples
--------
>>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k-1, dtype=bool)
return where(mask, zeros(1, m.dtype), m)
# Originally borrowed from John Hunter and matplotlib
def vander(x, N=None, increasing=False):
"""
Generate a Vandermonde matrix.
The columns of the output matrix are powers of the input vector. The
order of the powers is determined by the `increasing` boolean argument.
Specifically, when `increasing` is False, the `i`-th output column is
the input vector raised element-wise to the power of ``N - i - 1``. Such
a matrix with a geometric progression in each row is named for Alexandre-
Theophile Vandermonde.
Parameters
----------
x : array_like
1-D input array.
N : int, optional
Number of columns in the output. If `N` is not specified, a square
array is returned (``N = len(x)``).
increasing : bool, optional
Order of the powers of the columns. If True, the powers increase
from left to right, if False (the default) they are reversed.
.. versionadded:: 1.9.0
Returns
-------
out : ndarray
Vandermonde matrix. If `increasing` is False, the first column is
``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `increasing` is
True, the columns are ``x^0, x^1, ..., x^(N-1)``.
See Also
--------
polynomial.polynomial.polyvander
Examples
--------
>>> x = np.array([1, 2, 3, 5])
>>> N = 3
>>> np.vander(x, N)
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> np.column_stack([x**(N-1-i) for i in range(N)])
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> x = np.array([1, 2, 3, 5])
>>> np.vander(x)
array([[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]])
>>> np.vander(x, increasing=True)
array([[ 1, 1, 1, 1],
[ 1, 2, 4, 8],
[ 1, 3, 9, 27],
[ 1, 5, 25, 125]])
The determinant of a square Vandermonde matrix is the product
of the differences between the values of the input vector:
>>> np.linalg.det(np.vander(x))
48.000000000000043
>>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)
48
"""
x = asarray(x)
if x.ndim != 1:
raise ValueError("x must be a one-dimensional array or sequence.")
if N is None:
N = len(x)
v = empty((len(x), N), dtype=promote_types(x.dtype, int))
tmp = v[:, ::-1] if not increasing else v
if N > 0:
tmp[:, 0] = 1
if N > 1:
tmp[:, 1:] = x[:, None]
multiply.accumulate(tmp[:, 1:], out=tmp[:, 1:], axis=1)
return v
def histogram2d(x, y, bins=10, range=None, normed=False, weights=None):
"""
Compute the bi-dimensional histogram of two data samples.
Parameters
----------
x : array_like, shape (N,)
An array containing the x coordinates of the points to be
histogrammed.
y : array_like, shape (N,)
An array containing the y coordinates of the points to be
histogrammed.
bins : int or array_like or [int, int] or [array, array], optional
The bin specification:
* If int, the number of bins for the two dimensions (nx=ny=bins).
* If array_like, the bin edges for the two dimensions
(x_edges=y_edges=bins).
* If [int, int], the number of bins in each dimension
(nx, ny = bins).
* If [array, array], the bin edges in each dimension
(x_edges, y_edges = bins).
* A combination [int, array] or [array, int], where int
is the number of bins and array is the bin edges.
range : array_like, shape(2,2), optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
will be considered outliers and not tallied in the histogram.
normed : bool, optional
If False, returns the number of samples in each bin. If True,
returns the bin density ``bin_count / sample_count / bin_area``.
weights : array_like, shape(N,), optional
An array of values ``w_i`` weighing each sample ``(x_i, y_i)``.
Weights are normalized to 1 if `normed` is True. If `normed` is
False, the values of the returned histogram are equal to the sum of
the weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray, shape(nx, ny)
The bi-dimensional histogram of samples `x` and `y`. Values in `x`
are histogrammed along the first dimension and values in `y` are
histogrammed along the second dimension.
xedges : ndarray, shape(nx,)
The bin edges along the first dimension.
yedges : ndarray, shape(ny,)
The bin edges along the second dimension.
See Also
--------
histogram : 1D histogram
histogramdd : Multidimensional histogram
Notes
-----
When `normed` is True, then the returned histogram is the sample
density, defined such that the sum over bins of the product
``bin_value * bin_area`` is 1.
Please note that the histogram does not follow the Cartesian convention
where `x` values are on the abscissa and `y` values on the ordinate
axis. Rather, `x` is histogrammed along the first dimension of the
array (vertical), and `y` along the second dimension of the array
(horizontal). This ensures compatibility with `histogramdd`.
Examples
--------
>>> import matplotlib as mpl
>>> import matplotlib.pyplot as plt
Construct a 2D-histogram with variable bin width. First define the bin
edges:
>>> xedges = [0, 1, 1.5, 3, 5]
>>> yedges = [0, 2, 3, 4, 6]
Next we create a histogram H with random bin content:
>>> x = np.random.normal(3, 1, 100)
>>> y = np.random.normal(1, 1, 100)
>>> H, xedges, yedges = np.histogram2d(y, x, bins=(xedges, yedges))
Or we fill the histogram H with a determined bin content:
>>> H = np.ones((4, 4)).cumsum().reshape(4, 4)
>>> print H[::-1] # This shows the bin content in the order as plotted
[[ 13. 14. 15. 16.]
[ 9. 10. 11. 12.]
[ 5. 6. 7. 8.]
[ 1. 2. 3. 4.]]
Imshow can only do an equidistant representation of bins:
>>> fig = plt.figure(figsize=(7, 3))
>>> ax = fig.add_subplot(131)
>>> ax.set_title('imshow: equidistant')
>>> im = plt.imshow(H, interpolation='nearest', origin='low',
extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])
pcolormesh can display exact bin edges:
>>> ax = fig.add_subplot(132)
>>> ax.set_title('pcolormesh: exact bin edges')
>>> X, Y = np.meshgrid(xedges, yedges)
>>> ax.pcolormesh(X, Y, H)
>>> ax.set_aspect('equal')
NonUniformImage displays exact bin edges with interpolation:
>>> ax = fig.add_subplot(133)
>>> ax.set_title('NonUniformImage: interpolated')
>>> im = mpl.image.NonUniformImage(ax, interpolation='bilinear')
>>> xcenters = xedges[:-1] + 0.5 * (xedges[1:] - xedges[:-1])
>>> ycenters = yedges[:-1] + 0.5 * (yedges[1:] - yedges[:-1])
>>> im.set_data(xcenters, ycenters, H)
>>> ax.images.append(im)
>>> ax.set_xlim(xedges[0], xedges[-1])
>>> ax.set_ylim(yedges[0], yedges[-1])
>>> ax.set_aspect('equal')
>>> plt.show()
"""
from numpy import histogramdd
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = asarray(bins, float)
bins = [xedges, yedges]
hist, edges = histogramdd([x, y], bins, range, normed, weights)
return hist, edges[0], edges[1]
def mask_indices(n, mask_func, k=0):
"""
Return the indices to access (n, n) arrays, given a masking function.
Assume `mask_func` is a function that, for a square array a of size
``(n, n)`` with a possible offset argument `k`, when called as
``mask_func(a, k)`` returns a new array with zeros in certain locations
(functions like `triu` or `tril` do precisely this). Then this function
returns the indices where the non-zero values would be located.
Parameters
----------
n : int
The returned indices will be valid to access arrays of shape (n, n).
mask_func : callable
A function whose call signature is similar to that of `triu`, `tril`.
That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`.
`k` is an optional argument to the function.
k : scalar
An optional argument which is passed through to `mask_func`. Functions
like `triu`, `tril` take a second argument that is interpreted as an
offset.
Returns
-------
indices : tuple of arrays.
The `n` arrays of indices corresponding to the locations where
``mask_func(np.ones((n, n)), k)`` is True.
See Also
--------
triu, tril, triu_indices, tril_indices
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
These are the indices that would allow you to access the upper triangular
part of any 3x3 array:
>>> iu = np.mask_indices(3, np.triu)
For example, if `a` is a 3x3 array:
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> a[iu]
array([0, 1, 2, 4, 5, 8])
An offset can be passed also to the masking function. This gets us the
indices starting on the first diagonal right of the main one:
>>> iu1 = np.mask_indices(3, np.triu, 1)
with which we now extract only three elements:
>>> a[iu1]
array([1, 2, 5])
"""
m = ones((n, n), int)
a = mask_func(m, k)
return where(a != 0)
def tril_indices(n, k=0, m=None):
"""
Return the indices for the lower-triangle of an (n, m) array.
Parameters
----------
n : int
The row dimension of the arrays for which the returned
indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
triu_indices : similar function, for upper-triangular.
mask_indices : generic function accepting an arbitrary mask function.
tril, triu
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
lower triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> il1 = np.tril_indices(4)
>>> il2 = np.tril_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[il1]
array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
And for assigning values:
>>> a[il1] = -1
>>> a
array([[-1, 1, 2, 3],
[-1, -1, 6, 7],
[-1, -1, -1, 11],
[-1, -1, -1, -1]])
These cover almost the whole array (two diagonals right of the main one):
>>> a[il2] = -10
>>> a
array([[-10, -10, -10, 3],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]])
"""
return where(tri(n, m, k=k, dtype=bool))
def tril_indices_from(arr, k=0):
"""
Return the indices for the lower-triangle of arr.
See `tril_indices` for full details.
Parameters
----------
arr : array_like
The indices will be valid for square arrays whose dimensions are
the same as arr.
k : int, optional
Diagonal offset (see `tril` for details).
See Also
--------
tril_indices, tril
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])
def triu_indices(n, k=0, m=None):
"""
Return the indices for the upper-triangle of an (n, m) array.
Parameters
----------
n : int
The size of the arrays for which the returned indices will
be valid.
k : int, optional
Diagonal offset (see `triu` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple, shape(2) of ndarrays, shape(`n`)
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array. Can be used
to slice a ndarray of shape(`n`, `n`).
See also
--------
tril_indices : similar function, for lower-triangular.
mask_indices : generic function accepting an arbitrary mask function.
triu, tril
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
upper triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> iu1 = np.triu_indices(4)
>>> iu2 = np.triu_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[iu1]
array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15])
And for assigning values:
>>> a[iu1] = -1
>>> a
array([[-1, -1, -1, -1],
[ 4, -1, -1, -1],
[ 8, 9, -1, -1],
[12, 13, 14, -1]])
These cover only a small part of the whole array (two diagonals right
of the main one):
>>> a[iu2] = -10
>>> a
array([[ -1, -1, -10, -10],
[ 4, -1, -1, -10],
[ 8, 9, -1, -1],
[ 12, 13, 14, -1]])
"""
return where(~tri(n, m, k=k-1, dtype=bool))
def triu_indices_from(arr, k=0):
"""
Return the indices for the upper-triangle of arr.
See `triu_indices` for full details.
Parameters
----------
arr : ndarray, shape(N, N)
The indices will be valid for square arrays.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
triu_indices_from : tuple, shape(2) of ndarray, shape(N)
Indices for the upper-triangle of `arr`.
See Also
--------
triu_indices, triu
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])
| mit |
soulmachine/scikit-learn | sklearn/kernel_approximation.py | 3 | 16954 | """
The :mod:`sklearn.kernel_approximation` module implements several
approximate kernel feature maps base on Fourier transforms.
"""
# Author: Andreas Mueller <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import svd
from .base import BaseEstimator
from .base import TransformerMixin
from .utils import check_array, check_random_state, as_float_array
from .utils.extmath import safe_sparse_dot
from .metrics.pairwise import pairwise_kernels
class RBFSampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of an RBF kernel by Monte Carlo approximation
of its Fourier transform.
Parameters
----------
gamma : float
Parameter of RBF kernel: exp(-gamma * x^2)
n_components : int
Number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
Notes
-----
See "Random Features for Large-Scale Kernel Machines" by A. Rahimi and
Benjamin Recht.
"""
def __init__(self, gamma=1., n_components=100, random_state=None):
self.gamma = gamma
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X, accept_sparse='csr')
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
self.random_weights_ = (np.sqrt(self.gamma) * random_state.normal(
size=(n_features, self.n_components)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X, y=None):
"""Apply the approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X = check_array(X, accept_sparse='csr')
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class SkewedChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of the "skewed chi-squared" kernel by Monte
Carlo approximation of its Fourier transform.
Parameters
----------
skewedness : float
"skewedness" parameter of the kernel. Needs to be cross-validated.
n_components : int
number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
References
----------
See "Random Fourier Approximations for Skewed Multiplicative Histogram
Kernels" by Fuxin Li, Catalin Ionescu and Cristian Sminchisescu.
See also
--------
AdditiveChi2Sampler : A different approach for approximating an additive
variant of the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
"""
def __init__(self, skewedness=1., n_components=100, random_state=None):
self.skewedness = skewedness
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X)
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
uniform = random_state.uniform(size=(n_features, self.n_components))
# transform by inverse CDF of sech
self.random_weights_ = (1. / np.pi
* np.log(np.tan(np.pi / 2. * uniform)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X, y=None):
"""Apply the approximate feature map to X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X = as_float_array(X, copy=True)
X = check_array(X, copy=False)
if (X < 0).any():
raise ValueError("X may not contain entries smaller than zero.")
X += self.skewedness
np.log(X, X)
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class AdditiveChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximate feature map for additive chi2 kernel.
Uses sampling the fourier transform of the kernel characteristic
at regular intervals.
Since the kernel that is to be approximated is additive, the components of
the input vectors can be treated separately. Each entry in the original
space is transformed into 2*sample_steps+1 features, where sample_steps is
a parameter of the method. Typical values of sample_steps include 1, 2 and
3.
Optimal choices for the sampling interval for certain data ranges can be
computed (see the reference). The default values should be reasonable.
Parameters
----------
sample_steps : int, optional
Gives the number of (complex) sampling points.
sample_interval : float, optional
Sampling interval. Must be specified when sample_steps not in {1,2,3}.
Notes
-----
This estimator approximates a slightly different version of the additive
chi squared kernel then ``metric.additive_chi2`` computes.
See also
--------
SkewedChi2Sampler : A Fourier-approximation to a non-additive variant of
the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
sklearn.metrics.pairwise.additive_chi2_kernel : The exact additive chi
squared kernel.
References
----------
See `"Efficient additive kernels via explicit feature maps"
<http://eprints.pascal-network.org/archive/00006964/01/vedaldi10.pdf>`_
Vedaldi, A. and Zisserman, A., Computer Vision and Pattern Recognition 2010
"""
def __init__(self, sample_steps=2, sample_interval=None):
self.sample_steps = sample_steps
self.sample_interval = sample_interval
def fit(self, X, y=None):
"""Set parameters."""
X = check_array(X, accept_sparse='csr')
if self.sample_interval is None:
# See reference, figure 2 c)
if self.sample_steps == 1:
self.sample_interval_ = 0.8
elif self.sample_steps == 2:
self.sample_interval_ = 0.5
elif self.sample_steps == 3:
self.sample_interval_ = 0.4
else:
raise ValueError("If sample_steps is not in [1, 2, 3],"
" you need to provide sample_interval")
else:
self.sample_interval_ = self.sample_interval
return self
def transform(self, X, y=None):
"""Apply approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Returns
-------
X_new : {array, sparse matrix}, \
shape = (n_samples, n_features * (2*sample_steps + 1))
Whether the return value is an array of sparse matrix depends on
the type of the input X.
"""
X = check_array(X, accept_sparse='csr')
sparse = sp.issparse(X)
# check if X has negative values. Doesn't play well with np.log.
if ((X.data if sparse else X) < 0).any():
raise ValueError("Entries of X must be non-negative.")
# zeroth component
# 1/cosh = sech
# cosh(0) = 1.0
transf = self._transform_sparse if sparse else self._transform_dense
return transf(X)
def _transform_dense(self, X):
non_zero = (X != 0.0)
X_nz = X[non_zero]
X_step = np.zeros_like(X)
X_step[non_zero] = np.sqrt(X_nz * self.sample_interval_)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X_nz)
step_nz = 2 * X_nz * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.cos(j * log_step_nz)
X_new.append(X_step)
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.sin(j * log_step_nz)
X_new.append(X_step)
return np.hstack(X_new)
def _transform_sparse(self, X):
indices = X.indices.copy()
indptr = X.indptr.copy()
data_step = np.sqrt(X.data * self.sample_interval_)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X.data)
step_nz = 2 * X.data * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
data_step = factor_nz * np.cos(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
data_step = factor_nz * np.sin(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
return sp.hstack(X_new)
class Nystroem(BaseEstimator, TransformerMixin):
"""Approximate a kernel map using a subset of the training data.
Constructs an approximate feature map for an arbitrary kernel
using a subset of the data as basis.
Parameters
----------
kernel : string or callable, default="rbf"
Kernel map to be approximated. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
n_components : int
Number of features to construct.
How many data points will be used to construct the mapping.
gamma : float, default=None
Gamma parameter for the RBF, polynomial, exponential chi2 and
sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
Attributes
----------
components_ : array, shape (n_components, n_features)
Subset of training points used to construct the feature map.
component_indices_ : array, shape (n_components)
Indices of ``components_`` in the training set.
normalization_ : array, shape (n_components, n_components)
Normalization matrix needed for embedding.
Square root of the kernel matrix on ``components_``.
References
----------
* Williams, C.K.I. and Seeger, M.
"Using the Nystroem method to speed up kernel machines",
Advances in neural information processing systems 2001
* T. Yang, Y. Li, M. Mahdavi, R. Jin and Z. Zhou
"Nystroem Method vs Random Fourier Features: A Theoretical and Empirical
Comparison",
Advances in Neural Information Processing Systems 2012
See also
--------
RBFSampler : An approximation to the RBF kernel using random Fourier
features.
sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels.
"""
def __init__(self, kernel="rbf", gamma=None, coef0=1, degree=3,
kernel_params=None, n_components=100, random_state=None):
self.kernel = kernel
self.gamma = gamma
self.coef0 = coef0
self.degree = degree
self.kernel_params = kernel_params
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit estimator to data.
Samples a subset of training points, computes kernel
on these and computes normalization matrix.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Training data.
"""
rnd = check_random_state(self.random_state)
if not sp.issparse(X):
X = np.asarray(X)
n_samples = X.shape[0]
# get basis vectors
if self.n_components > n_samples:
# XXX should we just bail?
n_components = n_samples
warnings.warn("n_components > n_samples. This is not possible.\n"
"n_components was set to n_samples, which results"
" in inefficient evaluation of the full kernel.")
else:
n_components = self.n_components
n_components = min(n_samples, n_components)
inds = rnd.permutation(n_samples)
basis_inds = inds[:n_components]
basis = X[basis_inds]
basis_kernel = pairwise_kernels(basis, metric=self.kernel,
filter_params=True,
**self._get_kernel_params())
# sqrt of kernel matrix on basis vectors
U, S, V = svd(basis_kernel)
self.normalization_ = np.dot(U * 1. / np.sqrt(S), V)
self.components_ = basis
self.component_indices_ = inds
return self
def transform(self, X):
"""Apply feature map to X.
Computes an approximate feature map using the kernel
between some training points and X.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Data to transform.
Returns
-------
X_transformed : array, shape=(n_samples, n_components)
Transformed data.
"""
embedded = pairwise_kernels(X, self.components_,
metric=self.kernel,
filter_params=True,
**self._get_kernel_params())
return np.dot(embedded, self.normalization_.T)
def _get_kernel_params(self):
params = self.kernel_params
if params is None:
params = {}
if not callable(self.kernel):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
return params
| bsd-3-clause |
hainm/scikit-learn | examples/classification/plot_classifier_comparison.py | 181 | 4699 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=====================
Classifier comparison
=====================
A comparison of a several classifiers in scikit-learn on synthetic datasets.
The point of this example is to illustrate the nature of decision boundaries
of different classifiers.
This should be taken with a grain of salt, as the intuition conveyed by
these examples does not necessarily carry over to real datasets.
Particularly in high-dimensional spaces, data can more easily be separated
linearly and the simplicity of classifiers such as naive Bayes and linear SVMs
might lead to better generalization than is achieved by other classifiers.
The plots show training points in solid colors and testing points
semi-transparent. The lower right shows the classification accuracy on the test
set.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Andreas Müller
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.lda import LDA
from sklearn.qda import QDA
h = .02 # step size in the mesh
names = ["Nearest Neighbors", "Linear SVM", "RBF SVM", "Decision Tree",
"Random Forest", "AdaBoost", "Naive Bayes", "LDA", "QDA"]
classifiers = [
KNeighborsClassifier(3),
SVC(kernel="linear", C=0.025),
SVC(gamma=2, C=1),
DecisionTreeClassifier(max_depth=5),
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
AdaBoostClassifier(),
GaussianNB(),
LDA(),
QDA()]
X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,
random_state=1, n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
linearly_separable = (X, y)
datasets = [make_moons(noise=0.3, random_state=0),
make_circles(noise=0.2, factor=0.5, random_state=1),
linearly_separable
]
figure = plt.figure(figsize=(27, 9))
i = 1
# iterate over datasets
for ds in datasets:
# preprocess dataset, split into training and test part
X, y = ds
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4)
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# just plot the dataset first
cm = plt.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
i += 1
# iterate over classifiers
for name, clf in zip(names, classifiers):
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
if hasattr(clf, "decision_function"):
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# Put the result into a color plot
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)
# Plot also the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,
alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(name)
ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),
size=15, horizontalalignment='right')
i += 1
figure.subplots_adjust(left=.02, right=.98)
plt.show()
| bsd-3-clause |
edocappelli/oasys-crystalpy | diff_pat.py | 1 | 3239 |
def plot_crystal_sketch(v0_h,v0_v,vH_h ,vH_v ,H_h ,H_v):
import matplotlib.pyplot as plt
from matplotlib.path import Path
import matplotlib.patches as patches
hshift = 0.2
vshift = 0.0
plt.figure(1, figsize=(6,6))
ax = plt.subplot(111)
ax.xaxis.set_ticks_position('none)
ax.yaxis.set_ticks_position('none')
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
# draw axes
plt.xlim([-2.2,2.2])
plt.ylim([-2.2,2.2])
ax.annotate("",
xy =(0.0,0.0), xycoords='data',
xytext=(2.0,0.0), textcoords='data',
arrowprops=dict(arrowstyle="<-",connectionstyle="arc3"),)
plt.text(2, 0,"$x_2$", color='k')
ax.annotate("",
xy =(0.0,0.0), xycoords='data',
xytext=(0.0,2.0), textcoords='data',
arrowprops=dict(arrowstyle="<-",connectionstyle="arc3"),)
plt.text(0, 2,"$x_3$", color='k')
# draw vectors
ax.annotate("",
xy =(-v0_h,-v0_v), xycoords='data',
xytext=(0.0,0.0), textcoords='data',
arrowprops=dict(arrowstyle="<-",connectionstyle="arc3",color='red'),)
plt.text(-v0_h+hshift,-v0_v+vshift, r"$\vec k_0$", color='r')
ax.annotate("",
xy =(0,0), xycoords='data',
xytext=(vH_h,vH_v), textcoords='data',
arrowprops=dict(arrowstyle="<-",connectionstyle="arc3",color='red'),)
plt.text(vH_h+hshift,vH_v+vshift, r"$\vec k_H$", color='r')
ax.annotate("",
xy =(0,0), xycoords='data',
xytext=(H_h,H_v), textcoords='data',
arrowprops=dict(arrowstyle="<-",connectionstyle="arc3",color='blue'),)
plt.text(H_h+hshift,H_v+vshift, r"$\vec H$", color='b')
# draw Bragg plane
ax.annotate("",
xy =(0,0), xycoords='data',
xytext=( -H_v*1.5, H_h*1.5), textcoords='data',
arrowprops=dict(arrowstyle="-",connectionstyle="arc3",color='green'),)
ax.annotate("",
xy =(0,0), xycoords='data',
xytext=(H_v*1.5,-H_h*1.5), textcoords='data',
arrowprops=dict(arrowstyle="-",connectionstyle="arc3",color='green'),)
# draw crystal
#
x1 = -0.8
y1 = -0.1
x2 = 0.8
y2 = 0.0
verts = [
(x1,y1), # left, bottom
(x2,y1), # left, top
(x2,y2), # right, top
(x1,y2), # right, bottom
(x1,y1), # ignored
]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
path = Path(verts, codes)
patch = patches.PathPatch(path, facecolor='orange', lw=2)
ax.add_patch(patch)
plt.show()
if __name__ == "__main__":
# All vectors are normalized
v0_h = 0.92515270745695932
v0_v = -0.37959513680375029
vH_h = 0.99394445110430663
vH_v = 0.10988370269953050
H_h = 0.13917309988899462
H_v = 0.99026806889209951
plot_crystal_sketch(v0_h,v0_v,vH_h ,vH_v ,H_h ,H_v)
| mit |
rkmaddox/mne-python | tutorials/preprocessing/25_background_filtering.py | 3 | 48286 | # -*- coding: utf-8 -*-
r"""
.. _disc-filtering:
===================================
Background information on filtering
===================================
Here we give some background information on filtering in general, and
how it is done in MNE-Python in particular.
Recommended reading for practical applications of digital
filter design can be found in
Parks & Burrus (1987) :footcite:`ParksBurrus1987`
and Ifeachor & Jervis (2002) :footcite:`IfeachorJervis2002`,
and for filtering in an M/EEG context we recommend reading
Widmann *et al.* (2015) :footcite:`WidmannEtAl2015`.
.. note::
This tutorial goes pretty deep into the mathematics of filtering and the
design decisions that go into choosing a filter. If you just want to know
how to apply the default filters in MNE-Python to your data, skip this
tutorial and read :ref:`tut-filter-resample` instead (but someday, you
should come back and read this one too 🙂).
Problem statement
=================
Practical issues with filtering electrophysiological data are covered
in Widmann *et al.* (2012) :footcite:`WidmannSchroger2012`, where they
conclude with this statement:
Filtering can result in considerable distortions of the time course
(and amplitude) of a signal as demonstrated by VanRullen (2011)
:footcite:`VanRullen2011`.
Thus, filtering should not be used lightly. However, if effects of
filtering are cautiously considered and filter artifacts are minimized,
a valid interpretation of the temporal dynamics of filtered
electrophysiological data is possible and signals missed otherwise
can be detected with filtering.
In other words, filtering can increase signal-to-noise ratio (SNR), but if it
is not used carefully, it can distort data. Here we hope to cover some
filtering basics so users can better understand filtering trade-offs and why
MNE-Python has chosen particular defaults.
.. _tut_filtering_basics:
Filtering basics
================
Let's get some of the basic math down. In the frequency domain, digital
filters have a transfer function that is given by:
.. math::
H(z) &= \frac{b_0 + b_1 z^{-1} + b_2 z^{-2} + \ldots + b_M z^{-M}}
{1 + a_1 z^{-1} + a_2 z^{-2} + \ldots + a_N z^{-M}} \\
&= \frac{\sum_{k=0}^Mb_kz^{-k}}{\sum_{k=1}^Na_kz^{-k}}
In the time domain, the numerator coefficients :math:`b_k` and denominator
coefficients :math:`a_k` can be used to obtain our output data
:math:`y(n)` in terms of our input data :math:`x(n)` as:
.. math::
:label: summations
y(n) &= b_0 x(n) + b_1 x(n-1) + \ldots + b_M x(n-M)
- a_1 y(n-1) - a_2 y(n - 2) - \ldots - a_N y(n - N)\\
&= \sum_{k=0}^M b_k x(n-k) - \sum_{k=1}^N a_k y(n-k)
In other words, the output at time :math:`n` is determined by a sum over
1. the numerator coefficients :math:`b_k`, which get multiplied by
the previous input values :math:`x(n-k)`, and
2. the denominator coefficients :math:`a_k`, which get multiplied by
the previous output values :math:`y(n-k)`.
Note that these summations correspond to (1) a weighted `moving average`_ and
(2) an autoregression_.
Filters are broken into two classes: FIR_ (finite impulse response) and
IIR_ (infinite impulse response) based on these coefficients.
FIR filters use a finite number of numerator
coefficients :math:`b_k` (:math:`\forall k, a_k=0`), and thus each output
value of :math:`y(n)` depends only on the :math:`M` previous input values.
IIR filters depend on the previous input and output values, and thus can have
effectively infinite impulse responses.
As outlined in Parks & Burrus (1987) :footcite:`ParksBurrus1987`,
FIR and IIR have different trade-offs:
* A causal FIR filter can be linear-phase -- i.e., the same time delay
across all frequencies -- whereas a causal IIR filter cannot. The phase
and group delay characteristics are also usually better for FIR filters.
* IIR filters can generally have a steeper cutoff than an FIR filter of
equivalent order.
* IIR filters are generally less numerically stable, in part due to
accumulating error (due to its recursive calculations).
In MNE-Python we default to using FIR filtering. As noted in Widmann *et al.*
(2015) :footcite:`WidmannEtAl2015`:
Despite IIR filters often being considered as computationally more
efficient, they are recommended only when high throughput and sharp
cutoffs are required
(Ifeachor and Jervis, 2002 :footcite:`IfeachorJervis2002`, p. 321)...
FIR filters are easier to control, are always stable, have a
well-defined passband, can be corrected to zero-phase without
additional computations, and can be converted to minimum-phase.
We therefore recommend FIR filters for most purposes in
electrophysiological data analysis.
When designing a filter (FIR or IIR), there are always trade-offs that
need to be considered, including but not limited to:
1. Ripple in the pass-band
2. Attenuation of the stop-band
3. Steepness of roll-off
4. Filter order (i.e., length for FIR filters)
5. Time-domain ringing
In general, the sharper something is in frequency, the broader it is in time,
and vice-versa. This is a fundamental time-frequency trade-off, and it will
show up below.
FIR Filters
===========
First, we will focus on FIR filters, which are the default filters used by
MNE-Python.
"""
###############################################################################
# Designing FIR filters
# ---------------------
# Here we'll try to design a low-pass filter and look at trade-offs in terms
# of time- and frequency-domain filter characteristics. Later, in
# :ref:`tut_effect_on_signals`, we'll look at how such filters can affect
# signals when they are used.
#
# First let's import some useful tools for filtering, and set some default
# values for our data that are reasonable for M/EEG.
import numpy as np
from numpy.fft import fft, fftfreq
from scipy import signal
import matplotlib.pyplot as plt
from mne.time_frequency.tfr import morlet
from mne.viz import plot_filter, plot_ideal_filter
import mne
sfreq = 1000.
f_p = 40.
flim = (1., sfreq / 2.) # limits for plotting
###############################################################################
# Take for example an ideal low-pass filter, which would give a magnitude
# response of 1 in the pass-band (up to frequency :math:`f_p`) and a magnitude
# response of 0 in the stop-band (down to frequency :math:`f_s`) such that
# :math:`f_p=f_s=40` Hz here (shown to a lower limit of -60 dB for simplicity):
nyq = sfreq / 2. # the Nyquist frequency is half our sample rate
freq = [0, f_p, f_p, nyq]
gain = [1, 1, 0, 0]
third_height = np.array(plt.rcParams['figure.figsize']) * [1, 1. / 3.]
ax = plt.subplots(1, figsize=third_height)[1]
plot_ideal_filter(freq, gain, ax, title='Ideal %s Hz lowpass' % f_p, flim=flim)
###############################################################################
# This filter hypothetically achieves zero ripple in the frequency domain,
# perfect attenuation, and perfect steepness. However, due to the discontinuity
# in the frequency response, the filter would require infinite ringing in the
# time domain (i.e., infinite order) to be realized. Another way to think of
# this is that a rectangular window in the frequency domain is actually a sinc_
# function in the time domain, which requires an infinite number of samples
# (and thus infinite time) to represent. So although this filter has ideal
# frequency suppression, it has poor time-domain characteristics.
#
# Let's try to naïvely make a brick-wall filter of length 0.1 s, and look
# at the filter itself in the time domain and the frequency domain:
n = int(round(0.1 * sfreq))
n -= n % 2 - 1 # make it odd
t = np.arange(-(n // 2), n // 2 + 1) / sfreq # center our sinc
h = np.sinc(2 * f_p * t) / (4 * np.pi)
plot_filter(h, sfreq, freq, gain, 'Sinc (0.1 s)', flim=flim, compensate=True)
###############################################################################
# This is not so good! Making the filter 10 times longer (1 s) gets us a
# slightly better stop-band suppression, but still has a lot of ringing in
# the time domain. Note the x-axis is an order of magnitude longer here,
# and the filter has a correspondingly much longer group delay (again equal
# to half the filter length, or 0.5 seconds):
n = int(round(1. * sfreq))
n -= n % 2 - 1 # make it odd
t = np.arange(-(n // 2), n // 2 + 1) / sfreq
h = np.sinc(2 * f_p * t) / (4 * np.pi)
plot_filter(h, sfreq, freq, gain, 'Sinc (1.0 s)', flim=flim, compensate=True)
###############################################################################
# Let's make the stop-band tighter still with a longer filter (10 s),
# with a resulting larger x-axis:
n = int(round(10. * sfreq))
n -= n % 2 - 1 # make it odd
t = np.arange(-(n // 2), n // 2 + 1) / sfreq
h = np.sinc(2 * f_p * t) / (4 * np.pi)
plot_filter(h, sfreq, freq, gain, 'Sinc (10.0 s)', flim=flim, compensate=True)
###############################################################################
# Now we have very sharp frequency suppression, but our filter rings for the
# entire 10 seconds. So this naïve method is probably not a good way to build
# our low-pass filter.
#
# Fortunately, there are multiple established methods to design FIR filters
# based on desired response characteristics. These include:
#
# 1. The Remez_ algorithm (:func:`scipy.signal.remez`, `MATLAB firpm`_)
# 2. Windowed FIR design (:func:`scipy.signal.firwin2`,
# :func:`scipy.signal.firwin`, and `MATLAB fir2`_)
# 3. Least squares designs (:func:`scipy.signal.firls`, `MATLAB firls`_)
# 4. Frequency-domain design (construct filter in Fourier
# domain and use an :func:`IFFT <numpy.fft.ifft>` to invert it)
#
# .. note:: Remez and least squares designs have advantages when there are
# "do not care" regions in our frequency response. However, we want
# well controlled responses in all frequency regions.
# Frequency-domain construction is good when an arbitrary response
# is desired, but generally less clean (due to sampling issues) than
# a windowed approach for more straightforward filter applications.
# Since our filters (low-pass, high-pass, band-pass, band-stop)
# are fairly simple and we require precise control of all frequency
# regions, we will primarily use and explore windowed FIR design.
#
# If we relax our frequency-domain filter requirements a little bit, we can
# use these functions to construct a lowpass filter that instead has a
# *transition band*, or a region between the pass frequency :math:`f_p`
# and stop frequency :math:`f_s`, e.g.:
trans_bandwidth = 10 # 10 Hz transition band
f_s = f_p + trans_bandwidth # = 50 Hz
freq = [0., f_p, f_s, nyq]
gain = [1., 1., 0., 0.]
ax = plt.subplots(1, figsize=third_height)[1]
title = '%s Hz lowpass with a %s Hz transition' % (f_p, trans_bandwidth)
plot_ideal_filter(freq, gain, ax, title=title, flim=flim)
###############################################################################
# Accepting a shallower roll-off of the filter in the frequency domain makes
# our time-domain response potentially much better. We end up with a more
# gradual slope through the transition region, but a *much* cleaner time
# domain signal. Here again for the 1 s filter:
h = signal.firwin2(n, freq, gain, nyq=nyq)
plot_filter(h, sfreq, freq, gain, 'Windowed 10 Hz transition (1.0 s)',
flim=flim, compensate=True)
###############################################################################
# Since our lowpass is around 40 Hz with a 10 Hz transition, we can actually
# use a shorter filter (5 cycles at 10 Hz = 0.5 s) and still get acceptable
# stop-band attenuation:
n = int(round(sfreq * 0.5)) + 1
h = signal.firwin2(n, freq, gain, nyq=nyq)
plot_filter(h, sfreq, freq, gain, 'Windowed 10 Hz transition (0.5 s)',
flim=flim, compensate=True)
###############################################################################
# But if we shorten the filter too much (2 cycles of 10 Hz = 0.2 s),
# our effective stop frequency gets pushed out past 60 Hz:
n = int(round(sfreq * 0.2)) + 1
h = signal.firwin2(n, freq, gain, nyq=nyq)
plot_filter(h, sfreq, freq, gain, 'Windowed 10 Hz transition (0.2 s)',
flim=flim, compensate=True)
###############################################################################
# If we want a filter that is only 0.1 seconds long, we should probably use
# something more like a 25 Hz transition band (0.2 s = 5 cycles @ 25 Hz):
trans_bandwidth = 25
f_s = f_p + trans_bandwidth
freq = [0, f_p, f_s, nyq]
h = signal.firwin2(n, freq, gain, nyq=nyq)
plot_filter(h, sfreq, freq, gain, 'Windowed 50 Hz transition (0.2 s)',
flim=flim, compensate=True)
###############################################################################
# So far, we have only discussed *non-causal* filtering, which means that each
# sample at each time point :math:`t` is filtered using samples that come
# after (:math:`t + \Delta t`) *and* before (:math:`t - \Delta t`) the current
# time point :math:`t`.
# In this sense, each sample is influenced by samples that come both before
# and after it. This is useful in many cases, especially because it does not
# delay the timing of events.
#
# However, sometimes it can be beneficial to use *causal* filtering,
# whereby each sample :math:`t` is filtered only using time points that came
# after it.
#
# Note that the delay is variable (whereas for linear/zero-phase filters it
# is constant) but small in the pass-band. Unlike zero-phase filters, which
# require time-shifting backward the output of a linear-phase filtering stage
# (and thus becoming non-causal), minimum-phase filters do not require any
# compensation to achieve small delays in the pass-band. Note that as an
# artifact of the minimum phase filter construction step, the filter does
# not end up being as steep as the linear/zero-phase version.
#
# We can construct a minimum-phase filter from our existing linear-phase
# filter with the :func:`scipy.signal.minimum_phase` function, and note
# that the falloff is not as steep:
h_min = signal.minimum_phase(h)
plot_filter(h_min, sfreq, freq, gain, 'Minimum-phase', flim=flim)
###############################################################################
# .. _tut_effect_on_signals:
#
# Applying FIR filters
# --------------------
#
# Now lets look at some practical effects of these filters by applying
# them to some data.
#
# Let's construct a Gaussian-windowed sinusoid (i.e., Morlet imaginary part)
# plus noise (random and line). Note that the original clean signal contains
# frequency content in both the pass band and transition bands of our
# low-pass filter.
dur = 10.
center = 2.
morlet_freq = f_p
tlim = [center - 0.2, center + 0.2]
tticks = [tlim[0], center, tlim[1]]
flim = [20, 70]
x = np.zeros(int(sfreq * dur) + 1)
blip = morlet(sfreq, [morlet_freq], n_cycles=7)[0].imag / 20.
n_onset = int(center * sfreq) - len(blip) // 2
x[n_onset:n_onset + len(blip)] += blip
x_orig = x.copy()
rng = np.random.RandomState(0)
x += rng.randn(len(x)) / 1000.
x += np.sin(2. * np.pi * 60. * np.arange(len(x)) / sfreq) / 2000.
###############################################################################
# Filter it with a shallow cutoff, linear-phase FIR (which allows us to
# compensate for the constant filter delay):
transition_band = 0.25 * f_p
f_s = f_p + transition_band
freq = [0., f_p, f_s, sfreq / 2.]
gain = [1., 1., 0., 0.]
# This would be equivalent:
h = mne.filter.create_filter(x, sfreq, l_freq=None, h_freq=f_p,
fir_design='firwin', verbose=True)
x_v16 = np.convolve(h, x)
# this is the linear->zero phase, causal-to-non-causal conversion / shift
x_v16 = x_v16[len(h) // 2:]
plot_filter(h, sfreq, freq, gain, 'MNE-Python 0.16 default', flim=flim,
compensate=True)
###############################################################################
# Filter it with a different design method ``fir_design="firwin2"``, and also
# compensate for the constant filter delay. This method does not produce
# quite as sharp a transition compared to ``fir_design="firwin"``, despite
# being twice as long:
transition_band = 0.25 * f_p
f_s = f_p + transition_band
freq = [0., f_p, f_s, sfreq / 2.]
gain = [1., 1., 0., 0.]
# This would be equivalent:
# filter_dur = 6.6 / transition_band # sec
# n = int(sfreq * filter_dur)
# h = signal.firwin2(n, freq, gain, nyq=sfreq / 2.)
h = mne.filter.create_filter(x, sfreq, l_freq=None, h_freq=f_p,
fir_design='firwin2', verbose=True)
x_v14 = np.convolve(h, x)[len(h) // 2:]
plot_filter(h, sfreq, freq, gain, 'MNE-Python 0.14 default', flim=flim,
compensate=True)
###############################################################################
# Let's also filter with the MNE-Python 0.13 default, which is a
# long-duration, steep cutoff FIR that gets applied twice:
transition_band = 0.5 # Hz
f_s = f_p + transition_band
filter_dur = 10. # sec
freq = [0., f_p, f_s, sfreq / 2.]
gain = [1., 1., 0., 0.]
# This would be equivalent
# n = int(sfreq * filter_dur)
# h = signal.firwin2(n, freq, gain, nyq=sfreq / 2.)
h = mne.filter.create_filter(x, sfreq, l_freq=None, h_freq=f_p,
h_trans_bandwidth=transition_band,
filter_length='%ss' % filter_dur,
fir_design='firwin2', verbose=True)
x_v13 = np.convolve(np.convolve(h, x)[::-1], h)[::-1][len(h) - 1:-len(h) - 1]
# the effective h is one that is applied to the time-reversed version of itself
h_eff = np.convolve(h, h[::-1])
plot_filter(h_eff, sfreq, freq, gain, 'MNE-Python 0.13 default', flim=flim,
compensate=True)
###############################################################################
# Let's also filter it with the MNE-C default, which is a long-duration
# steep-slope FIR filter designed using frequency-domain techniques:
h = mne.filter.design_mne_c_filter(sfreq, l_freq=None, h_freq=f_p + 2.5)
x_mne_c = np.convolve(h, x)[len(h) // 2:]
transition_band = 5 # Hz (default in MNE-C)
f_s = f_p + transition_band
freq = [0., f_p, f_s, sfreq / 2.]
gain = [1., 1., 0., 0.]
plot_filter(h, sfreq, freq, gain, 'MNE-C default', flim=flim, compensate=True)
###############################################################################
# And now an example of a minimum-phase filter:
h = mne.filter.create_filter(x, sfreq, l_freq=None, h_freq=f_p,
phase='minimum', fir_design='firwin',
verbose=True)
x_min = np.convolve(h, x)
transition_band = 0.25 * f_p
f_s = f_p + transition_band
filter_dur = 6.6 / transition_band # sec
n = int(sfreq * filter_dur)
freq = [0., f_p, f_s, sfreq / 2.]
gain = [1., 1., 0., 0.]
plot_filter(h, sfreq, freq, gain, 'Minimum-phase filter', flim=flim)
###############################################################################
# Both the MNE-Python 0.13 and MNE-C filters have excellent frequency
# attenuation, but it comes at a cost of potential
# ringing (long-lasting ripples) in the time domain. Ringing can occur with
# steep filters, especially in signals with frequency content around the
# transition band. Our Morlet wavelet signal has power in our transition band,
# and the time-domain ringing is thus more pronounced for the steep-slope,
# long-duration filter than the shorter, shallower-slope filter:
axes = plt.subplots(1, 2)[1]
def plot_signal(x, offset):
"""Plot a signal."""
t = np.arange(len(x)) / sfreq
axes[0].plot(t, x + offset)
axes[0].set(xlabel='Time (s)', xlim=t[[0, -1]])
X = fft(x)
freqs = fftfreq(len(x), 1. / sfreq)
mask = freqs >= 0
X = X[mask]
freqs = freqs[mask]
axes[1].plot(freqs, 20 * np.log10(np.maximum(np.abs(X), 1e-16)))
axes[1].set(xlim=flim)
yscale = 30
yticklabels = ['Original', 'Noisy', 'FIR-firwin (0.16)', 'FIR-firwin2 (0.14)',
'FIR-steep (0.13)', 'FIR-steep (MNE-C)', 'Minimum-phase']
yticks = -np.arange(len(yticklabels)) / yscale
plot_signal(x_orig, offset=yticks[0])
plot_signal(x, offset=yticks[1])
plot_signal(x_v16, offset=yticks[2])
plot_signal(x_v14, offset=yticks[3])
plot_signal(x_v13, offset=yticks[4])
plot_signal(x_mne_c, offset=yticks[5])
plot_signal(x_min, offset=yticks[6])
axes[0].set(xlim=tlim, title='FIR, Lowpass=%d Hz' % f_p, xticks=tticks,
ylim=[-len(yticks) / yscale, 1. / yscale],
yticks=yticks, yticklabels=yticklabels)
for text in axes[0].get_yticklabels():
text.set(rotation=45, size=8)
axes[1].set(xlim=flim, ylim=(-60, 10), xlabel='Frequency (Hz)',
ylabel='Magnitude (dB)')
mne.viz.tight_layout()
plt.show()
###############################################################################
# IIR filters
# ===========
#
# MNE-Python also offers IIR filtering functionality that is based on the
# methods from :mod:`scipy.signal`. Specifically, we use the general-purpose
# functions :func:`scipy.signal.iirfilter` and :func:`scipy.signal.iirdesign`,
# which provide unified interfaces to IIR filter design.
#
# Designing IIR filters
# ---------------------
#
# Let's continue with our design of a 40 Hz low-pass filter and look at
# some trade-offs of different IIR filters.
#
# Often the default IIR filter is a `Butterworth filter`_, which is designed
# to have a *maximally flat pass-band*. Let's look at a few filter orders,
# i.e., a few different number of coefficients used and therefore steepness
# of the filter:
#
# .. note:: Notice that the group delay (which is related to the phase) of
# the IIR filters below are not constant. In the FIR case, we can
# design so-called linear-phase filters that have a constant group
# delay, and thus compensate for the delay (making the filter
# non-causal) if necessary. This cannot be done with IIR filters, as
# they have a non-linear phase (non-constant group delay). As the
# filter order increases, the phase distortion near and in the
# transition band worsens. However, if non-causal (forward-backward)
# filtering can be used, e.g. with :func:`scipy.signal.filtfilt`,
# these phase issues can theoretically be mitigated.
sos = signal.iirfilter(2, f_p / nyq, btype='low', ftype='butter', output='sos')
plot_filter(dict(sos=sos), sfreq, freq, gain, 'Butterworth order=2', flim=flim,
compensate=True)
x_shallow = signal.sosfiltfilt(sos, x)
del sos
###############################################################################
# The falloff of this filter is not very steep.
#
# .. note:: Here we have made use of second-order sections (SOS)
# by using :func:`scipy.signal.sosfilt` and, under the
# hood, :func:`scipy.signal.zpk2sos` when passing the
# ``output='sos'`` keyword argument to
# :func:`scipy.signal.iirfilter`. The filter definitions
# given :ref:`above <tut_filtering_basics>` use the polynomial
# numerator/denominator (sometimes called "tf") form ``(b, a)``,
# which are theoretically equivalent to the SOS form used here.
# In practice, however, the SOS form can give much better results
# due to issues with numerical precision (see
# :func:`scipy.signal.sosfilt` for an example), so SOS should be
# used whenever possible.
#
# Let's increase the order, and note that now we have better attenuation,
# with a longer impulse response. Let's also switch to using the MNE filter
# design function, which simplifies a few things and gives us some information
# about the resulting filter:
iir_params = dict(order=8, ftype='butter')
filt = mne.filter.create_filter(x, sfreq, l_freq=None, h_freq=f_p,
method='iir', iir_params=iir_params,
verbose=True)
plot_filter(filt, sfreq, freq, gain, 'Butterworth order=8', flim=flim,
compensate=True)
x_steep = signal.sosfiltfilt(filt['sos'], x)
###############################################################################
# There are other types of IIR filters that we can use. For a complete list,
# check out the documentation for :func:`scipy.signal.iirdesign`. Let's
# try a Chebychev (type I) filter, which trades off ripple in the pass-band
# to get better attenuation in the stop-band:
iir_params.update(ftype='cheby1',
rp=1., # dB of acceptable pass-band ripple
)
filt = mne.filter.create_filter(x, sfreq, l_freq=None, h_freq=f_p,
method='iir', iir_params=iir_params,
verbose=True)
plot_filter(filt, sfreq, freq, gain,
'Chebychev-1 order=8, ripple=1 dB', flim=flim, compensate=True)
###############################################################################
# If we can live with even more ripple, we can get it slightly steeper,
# but the impulse response begins to ring substantially longer (note the
# different x-axis scale):
iir_params['rp'] = 6.
filt = mne.filter.create_filter(x, sfreq, l_freq=None, h_freq=f_p,
method='iir', iir_params=iir_params,
verbose=True)
plot_filter(filt, sfreq, freq, gain,
'Chebychev-1 order=8, ripple=6 dB', flim=flim,
compensate=True)
###############################################################################
# Applying IIR filters
# --------------------
#
# Now let's look at how our shallow and steep Butterworth IIR filters
# perform on our Morlet signal from before:
axes = plt.subplots(1, 2)[1]
yticks = np.arange(4) / -30.
yticklabels = ['Original', 'Noisy', 'Butterworth-2', 'Butterworth-8']
plot_signal(x_orig, offset=yticks[0])
plot_signal(x, offset=yticks[1])
plot_signal(x_shallow, offset=yticks[2])
plot_signal(x_steep, offset=yticks[3])
axes[0].set(xlim=tlim, title='IIR, Lowpass=%d Hz' % f_p, xticks=tticks,
ylim=[-0.125, 0.025], yticks=yticks, yticklabels=yticklabels,)
for text in axes[0].get_yticklabels():
text.set(rotation=45, size=8)
axes[1].set(xlim=flim, ylim=(-60, 10), xlabel='Frequency (Hz)',
ylabel='Magnitude (dB)')
mne.viz.adjust_axes(axes)
mne.viz.tight_layout()
plt.show()
###############################################################################
# Some pitfalls of filtering
# ==========================
#
# Multiple recent papers have noted potential risks of drawing
# errant inferences due to misapplication of filters.
#
# Low-pass problems
# -----------------
#
# Filters in general, especially those that are non-causal (zero-phase), can
# make activity appear to occur earlier or later than it truly did. As
# mentioned in VanRullen (2011) :footcite:`VanRullen2011`,
# investigations of commonly (at the time)
# used low-pass filters created artifacts when they were applied to simulated
# data. However, such deleterious effects were minimal in many real-world
# examples in Rousselet (2012) :footcite:`Rousselet2012`.
#
# Perhaps more revealing, it was noted in Widmann & Schröger (2012)
# :footcite:`WidmannSchroger2012` that the problematic low-pass filters from
# VanRullen (2011) :footcite:`VanRullen2011`:
#
# 1. Used a least-squares design (like :func:`scipy.signal.firls`) that
# included "do-not-care" transition regions, which can lead to
# uncontrolled behavior.
# 2. Had a filter length that was independent of the transition bandwidth,
# which can cause excessive ringing and signal distortion.
#
# .. _tut_filtering_hp_problems:
#
# High-pass problems
# ------------------
#
# When it comes to high-pass filtering, using corner frequencies above 0.1 Hz
# were found in Acunzo *et al.* (2012) :footcite:`AcunzoEtAl2012` to:
#
# "... generate a systematic bias easily leading to misinterpretations of
# neural activity.”
#
# In a related paper, Widmann *et al.* (2015) :footcite:`WidmannEtAl2015`
# also came to suggest a 0.1 Hz highpass. More evidence followed in
# Tanner *et al.* (2015) :footcite:`TannerEtAl2015` of such distortions.
# Using data from language ERP studies of semantic and
# syntactic processing (i.e., N400 and P600), using a high-pass above 0.3 Hz
# caused significant effects to be introduced implausibly early when compared
# to the unfiltered data. From this, the authors suggested the optimal
# high-pass value for language processing to be 0.1 Hz.
#
# We can recreate a problematic simulation from
# Tanner *et al.* (2015) :footcite:`TannerEtAl2015`:
#
# "The simulated component is a single-cycle cosine wave with an amplitude
# of 5µV [sic], onset of 500 ms poststimulus, and duration of 800 ms. The
# simulated component was embedded in 20 s of zero values to avoid
# filtering edge effects... Distortions [were] caused by 2 Hz low-pass
# and high-pass filters... No visible distortion to the original
# waveform [occurred] with 30 Hz low-pass and 0.01 Hz high-pass filters...
# Filter frequencies correspond to the half-amplitude (-6 dB) cutoff
# (12 dB/octave roll-off)."
#
# .. note:: This simulated signal contains energy not just within the
# pass-band, but also within the transition and stop-bands -- perhaps
# most easily understood because the signal has a non-zero DC value,
# but also because it is a shifted cosine that has been
# *windowed* (here multiplied by a rectangular window), which
# makes the cosine and DC frequencies spread to other frequencies
# (multiplication in time is convolution in frequency, so multiplying
# by a rectangular window in the time domain means convolving a sinc
# function with the impulses at DC and the cosine frequency in the
# frequency domain).
#
x = np.zeros(int(2 * sfreq))
t = np.arange(0, len(x)) / sfreq - 0.2
onset = np.where(t >= 0.5)[0][0]
cos_t = np.arange(0, int(sfreq * 0.8)) / sfreq
sig = 2.5 - 2.5 * np.cos(2 * np.pi * (1. / 0.8) * cos_t)
x[onset:onset + len(sig)] = sig
iir_lp_30 = signal.iirfilter(2, 30. / sfreq, btype='lowpass')
iir_hp_p1 = signal.iirfilter(2, 0.1 / sfreq, btype='highpass')
iir_lp_2 = signal.iirfilter(2, 2. / sfreq, btype='lowpass')
iir_hp_2 = signal.iirfilter(2, 2. / sfreq, btype='highpass')
x_lp_30 = signal.filtfilt(iir_lp_30[0], iir_lp_30[1], x, padlen=0)
x_hp_p1 = signal.filtfilt(iir_hp_p1[0], iir_hp_p1[1], x, padlen=0)
x_lp_2 = signal.filtfilt(iir_lp_2[0], iir_lp_2[1], x, padlen=0)
x_hp_2 = signal.filtfilt(iir_hp_2[0], iir_hp_2[1], x, padlen=0)
xlim = t[[0, -1]]
ylim = [-2, 6]
xlabel = 'Time (sec)'
ylabel = r'Amplitude ($\mu$V)'
tticks = [0, 0.5, 1.3, t[-1]]
axes = plt.subplots(2, 2)[1].ravel()
for ax, x_f, title in zip(axes, [x_lp_2, x_lp_30, x_hp_2, x_hp_p1],
['LP$_2$', 'LP$_{30}$', 'HP$_2$', 'LP$_{0.1}$']):
ax.plot(t, x, color='0.5')
ax.plot(t, x_f, color='k', linestyle='--')
ax.set(ylim=ylim, xlim=xlim, xticks=tticks,
title=title, xlabel=xlabel, ylabel=ylabel)
mne.viz.adjust_axes(axes)
mne.viz.tight_layout()
plt.show()
###############################################################################
# Similarly, in a P300 paradigm reported by
# Kappenman & Luck (2010) :footcite:`KappenmanLuck2010`,
# they found that applying a 1 Hz high-pass decreased the probability of
# finding a significant difference in the N100 response, likely because
# the P300 response was smeared (and inverted) in time by the high-pass
# filter such that it tended to cancel out the increased N100. However,
# they nonetheless note that some high-passing can still be useful to deal
# with drifts in the data.
#
# Even though these papers generally advise a 0.1 Hz or lower frequency for
# a high-pass, it is important to keep in mind (as most authors note) that
# filtering choices should depend on the frequency content of both the
# signal(s) of interest and the noise to be suppressed. For example, in
# some of the MNE-Python examples involving the :ref:`sample-dataset` dataset,
# high-pass values of around 1 Hz are used when looking at auditory
# or visual N100 responses, because we analyze standard (not deviant) trials
# and thus expect that contamination by later or slower components will
# be limited.
#
# Baseline problems (or solutions?)
# ---------------------------------
#
# In an evolving discussion, Tanner *et al.* (2015) :footcite:`TannerEtAl2015`
# suggest using baseline correction to remove slow drifts in data. However,
# Maess *et al.* (2016) :footcite:`MaessEtAl2016`
# suggest that baseline correction, which is a form of high-passing, does
# not offer substantial advantages over standard high-pass filtering.
# Tanner *et al.* (2016) :footcite:`TannerEtAl2016`
# rebutted that baseline correction can correct for problems with filtering.
#
# To see what they mean, consider again our old simulated signal ``x`` from
# before:
def baseline_plot(x):
all_axes = plt.subplots(3, 2)[1]
for ri, (axes, freq) in enumerate(zip(all_axes, [0.1, 0.3, 0.5])):
for ci, ax in enumerate(axes):
if ci == 0:
iir_hp = signal.iirfilter(4, freq / sfreq, btype='highpass',
output='sos')
x_hp = signal.sosfiltfilt(iir_hp, x, padlen=0)
else:
x_hp -= x_hp[t < 0].mean()
ax.plot(t, x, color='0.5')
ax.plot(t, x_hp, color='k', linestyle='--')
if ri == 0:
ax.set(title=('No ' if ci == 0 else '') +
'Baseline Correction')
ax.set(xticks=tticks, ylim=ylim, xlim=xlim, xlabel=xlabel)
ax.set_ylabel('%0.1f Hz' % freq, rotation=0,
horizontalalignment='right')
mne.viz.adjust_axes(axes)
mne.viz.tight_layout()
plt.suptitle(title)
plt.show()
baseline_plot(x)
###############################################################################
# In response, Maess *et al.* (2016) :footcite:`MaessEtAl2016a`
# note that these simulations do not
# address cases of pre-stimulus activity that is shared across conditions, as
# applying baseline correction will effectively copy the topology outside the
# baseline period. We can see this if we give our signal ``x`` with some
# consistent pre-stimulus activity, which makes everything look bad.
#
# .. note:: An important thing to keep in mind with these plots is that they
# are for a single simulated sensor. In multi-electrode recordings
# the topology (i.e., spatial pattern) of the pre-stimulus activity
# will leak into the post-stimulus period. This will likely create a
# spatially varying distortion of the time-domain signals, as the
# averaged pre-stimulus spatial pattern gets subtracted from the
# sensor time courses.
#
# Putting some activity in the baseline period:
n_pre = (t < 0).sum()
sig_pre = 1 - np.cos(2 * np.pi * np.arange(n_pre) / (0.5 * n_pre))
x[:n_pre] += sig_pre
baseline_plot(x)
###############################################################################
# Both groups seem to acknowledge that the choices of filtering cutoffs, and
# perhaps even the application of baseline correction, depend on the
# characteristics of the data being investigated, especially when it comes to:
#
# 1. The frequency content of the underlying evoked activity relative
# to the filtering parameters.
# 2. The validity of the assumption of no consistent evoked activity
# in the baseline period.
#
# We thus recommend carefully applying baseline correction and/or high-pass
# values based on the characteristics of the data to be analyzed.
#
#
# Filtering defaults
# ==================
#
# .. _tut_filtering_in_python:
#
# Defaults in MNE-Python
# ----------------------
#
# Most often, filtering in MNE-Python is done at the :class:`mne.io.Raw` level,
# and thus :func:`mne.io.Raw.filter` is used. This function under the hood
# (among other things) calls :func:`mne.filter.filter_data` to actually
# filter the data, which by default applies a zero-phase FIR filter designed
# using :func:`scipy.signal.firwin`.
# In Widmann *et al.* (2015) :footcite:`WidmannEtAl2015`, they
# suggest a specific set of parameters to use for high-pass filtering,
# including:
#
# "... providing a transition bandwidth of 25% of the lower passband
# edge but, where possible, not lower than 2 Hz and otherwise the
# distance from the passband edge to the critical frequency.”
#
# In practice, this means that for each high-pass value ``l_freq`` or
# low-pass value ``h_freq`` below, you would get this corresponding
# ``l_trans_bandwidth`` or ``h_trans_bandwidth``, respectively,
# if the sample rate were 100 Hz (i.e., Nyquist frequency of 50 Hz):
#
# +------------------+-------------------+-------------------+
# | l_freq or h_freq | l_trans_bandwidth | h_trans_bandwidth |
# +==================+===================+===================+
# | 0.01 | 0.01 | 2.0 |
# +------------------+-------------------+-------------------+
# | 0.1 | 0.1 | 2.0 |
# +------------------+-------------------+-------------------+
# | 1.0 | 1.0 | 2.0 |
# +------------------+-------------------+-------------------+
# | 2.0 | 2.0 | 2.0 |
# +------------------+-------------------+-------------------+
# | 4.0 | 2.0 | 2.0 |
# +------------------+-------------------+-------------------+
# | 8.0 | 2.0 | 2.0 |
# +------------------+-------------------+-------------------+
# | 10.0 | 2.5 | 2.5 |
# +------------------+-------------------+-------------------+
# | 20.0 | 5.0 | 5.0 |
# +------------------+-------------------+-------------------+
# | 40.0 | 10.0 | 10.0 |
# +------------------+-------------------+-------------------+
# | 50.0 | 12.5 | 12.5 |
# +------------------+-------------------+-------------------+
#
# MNE-Python has adopted this definition for its high-pass (and low-pass)
# transition bandwidth choices when using ``l_trans_bandwidth='auto'`` and
# ``h_trans_bandwidth='auto'``.
#
# To choose the filter length automatically with ``filter_length='auto'``,
# the reciprocal of the shortest transition bandwidth is used to ensure
# decent attenuation at the stop frequency. Specifically, the reciprocal
# (in samples) is multiplied by 3.1, 3.3, or 5.0 for the Hann, Hamming,
# or Blackman windows, respectively, as selected by the ``fir_window``
# argument for ``fir_design='firwin'``, and double these for
# ``fir_design='firwin2'`` mode.
#
# .. note:: For ``fir_design='firwin2'``, the multiplicative factors are
# doubled compared to what is given in
# Ifeachor & Jervis (2002) :footcite:`IfeachorJervis2002`
# (p. 357), as :func:`scipy.signal.firwin2` has a smearing effect
# on the frequency response, which we compensate for by
# increasing the filter length. This is why
# ``fir_desgin='firwin'`` is preferred to ``fir_design='firwin2'``.
#
# In 0.14, we default to using a Hamming window in filter design, as it
# provides up to 53 dB of stop-band attenuation with small pass-band ripple.
#
# .. note:: In band-pass applications, often a low-pass filter can operate
# effectively with fewer samples than the high-pass filter, so
# it is advisable to apply the high-pass and low-pass separately
# when using ``fir_design='firwin2'``. For design mode
# ``fir_design='firwin'``, there is no need to separate the
# operations, as the lowpass and highpass elements are constructed
# separately to meet the transition band requirements.
#
# For more information on how to use the
# MNE-Python filtering functions with real data, consult the preprocessing
# tutorial on :ref:`tut-filter-resample`.
#
# Defaults in MNE-C
# -----------------
# MNE-C by default uses:
#
# 1. 5 Hz transition band for low-pass filters.
# 2. 3-sample transition band for high-pass filters.
# 3. Filter length of 8197 samples.
#
# The filter is designed in the frequency domain, creating a linear-phase
# filter such that the delay is compensated for as is done with the MNE-Python
# ``phase='zero'`` filtering option.
#
# Squared-cosine ramps are used in the transition regions. Because these
# are used in place of more gradual (e.g., linear) transitions,
# a given transition width will result in more temporal ringing but also more
# rapid attenuation than the same transition width in windowed FIR designs.
#
# The default filter length will generally have excellent attenuation
# but long ringing for the sample rates typically encountered in M/EEG data
# (e.g. 500-2000 Hz).
#
# Defaults in other software
# --------------------------
# A good but possibly outdated comparison of filtering in various software
# packages is available in Widmann *et al.* (2015) :footcite:`WidmannEtAl2015`.
# Briefly:
#
# * EEGLAB
# MNE-Python 0.14 defaults to behavior very similar to that of EEGLAB
# (see the `EEGLAB filtering FAQ`_ for more information).
# * FieldTrip
# By default FieldTrip applies a forward-backward Butterworth IIR filter
# of order 4 (band-pass and band-stop filters) or 2 (for low-pass and
# high-pass filters). Similar filters can be achieved in MNE-Python when
# filtering with :meth:`raw.filter(..., method='iir') <mne.io.Raw.filter>`
# (see also :func:`mne.filter.construct_iir_filter` for options).
# For more information, see e.g. the
# `FieldTrip band-pass documentation <ftbp_>`_.
#
# Reporting Filters
# =================
# On page 45 in Widmann *et al.* (2015) :footcite:`WidmannEtAl2015`,
# there is a convenient list of
# important filter parameters that should be reported with each publication:
#
# 1. Filter type (high-pass, low-pass, band-pass, band-stop, FIR, IIR)
# 2. Cutoff frequency (including definition)
# 3. Filter order (or length)
# 4. Roll-off or transition bandwidth
# 5. Passband ripple and stopband attenuation
# 6. Filter delay (zero-phase, linear-phase, non-linear phase) and causality
# 7. Direction of computation (one-pass forward/reverse, or two-pass forward
# and reverse)
#
# In the following, we will address how to deal with these parameters in MNE:
#
#
# Filter type
# -----------
# Depending on the function or method used, the filter type can be specified.
# To name an example, in :func:`mne.filter.create_filter`, the relevant
# arguments would be ``l_freq``, ``h_freq``, ``method``, and if the method is
# FIR ``fir_window`` and ``fir_design``.
#
#
# Cutoff frequency
# ----------------
# The cutoff of FIR filters in MNE is defined as half-amplitude cutoff in the
# middle of the transition band. That is, if you construct a lowpass FIR filter
# with ``h_freq = 40``, the filter function will provide a transition
# bandwidth that depends on the ``h_trans_bandwidth`` argument. The desired
# half-amplitude cutoff of the lowpass FIR filter is then at
# ``h_freq + transition_bandwidth/2.``.
#
# Filter length (order) and transition bandwidth (roll-off)
# ---------------------------------------------------------
# In the :ref:`tut_filtering_in_python` section, we have already talked about
# the default filter lengths and transition bandwidths that are used when no
# custom values are specified using the respective filter function's arguments.
#
# If you want to find out about the filter length and transition bandwidth that
# were used through the 'auto' setting, you can use
# :func:`mne.filter.create_filter` to print out the settings once more:
# Use the same settings as when calling e.g., `raw.filter()`
fir_coefs = mne.filter.create_filter(
data=None, # data is only used for sanity checking, not strictly needed
sfreq=1000., # sfreq of your data in Hz
l_freq=None,
h_freq=40., # assuming a lowpass of 40 Hz
method='fir',
fir_window='hamming',
fir_design='firwin',
verbose=True)
# See the printed log for the transition bandwidth and filter length.
# Alternatively, get the filter length through:
filter_length = fir_coefs.shape[0]
###############################################################################
# .. note:: If you are using an IIR filter, :func:`mne.filter.create_filter`
# will not print a filter length and transition bandwidth to the log.
# Instead, you can specify the roll-off with the ``iir_params``
# argument or stay with the default, which is a fourth order
# (Butterworth) filter.
#
# Passband ripple and stopband attenuation
# ----------------------------------------
#
# When use standard :func:`scipy.signal.firwin` design (as for FIR filters in
# MNE), the passband ripple and stopband attenuation are dependent upon the
# window used in design. For standard windows the values are listed in this
# table (see Ifeachor & Jervis (2002) :footcite:`IfeachorJervis2002`, p. 357):
#
# +-------------------------+-----------------+----------------------+
# | Name of window function | Passband ripple | Stopband attenuation |
# +=========================+=================+======================+
# | Hann | 0.0545 dB | 44 dB |
# +-------------------------+-----------------+----------------------+
# | Hamming | 0.0194 dB | 53 dB |
# +-------------------------+-----------------+----------------------+
# | Blackman | 0.0017 dB | 74 dB |
# +-------------------------+-----------------+----------------------+
#
#
# Filter delay and direction of computation
# -----------------------------------------
# For reporting this information, it might be sufficient to read the docstring
# of the filter function or method that you apply. For example in the
# docstring of `mne.filter.create_filter`, for the phase parameter it says:
#
# Phase of the filter, only used if ``method='fir'``.
# By default, a symmetric linear-phase FIR filter is constructed.
# If ``phase='zero'`` (default), the delay of this filter
# is compensated for. If ``phase=='zero-double'``, then this filter
# is applied twice, once forward, and once backward. If 'minimum',
# then a minimum-phase, causal filter will be used.
#
#
# Summary
# =======
#
# When filtering, there are always trade-offs that should be considered.
# One important trade-off is between time-domain characteristics (like ringing)
# and frequency-domain attenuation characteristics (like effective transition
# bandwidth). Filters with sharp frequency cutoffs can produce outputs that
# ring for a long time when they operate on signals with frequency content
# in the transition band. In general, therefore, the wider a transition band
# that can be tolerated, the better behaved the filter will be in the time
# domain.
#
# References
# ==========
# .. footbibliography::
#
# .. _FIR: https://en.wikipedia.org/wiki/Finite_impulse_response
# .. _IIR: https://en.wikipedia.org/wiki/Infinite_impulse_response
# .. _sinc: https://en.wikipedia.org/wiki/Sinc_function
# .. _moving average: https://en.wikipedia.org/wiki/Moving_average
# .. _autoregression: https://en.wikipedia.org/wiki/Autoregressive_model
# .. _Remez: https://en.wikipedia.org/wiki/Remez_algorithm
# .. _matlab firpm: https://www.mathworks.com/help/signal/ref/firpm.html
# .. _matlab fir2: https://www.mathworks.com/help/signal/ref/fir2.html
# .. _matlab firls: https://www.mathworks.com/help/signal/ref/firls.html
# .. _Butterworth filter: https://en.wikipedia.org/wiki/Butterworth_filter
# .. _eeglab filtering faq: https://sccn.ucsd.edu/wiki/Firfilt_FAQ
# .. _ftbp: http://www.fieldtriptoolbox.org/reference/ft_preproc_bandpassfilter
| bsd-3-clause |
PTDreamer/dRonin | python/calibration/mag_calibration.py | 4 | 4543 | #!/usr/bin/python
from numpy import *
from matplotlib.pylab import *
def mag_calibration(mag,gyros=None,LH=200,LV=500):
""" Calibrates the magnetometer data by fitting it to a sphere,
ideally when constantly turning to spread the data around that
sphere somewhat evenly (or at least in a horizontal plane)"""
import numpy
from scipy.optimize import minimize
from numpy.core.multiarray import arange
def find_spinning(mag,gyros):
""" return the indicies in the magnetometer data when
the gyro indicates it is spinning on the z axis """
import scipy.signal
from matplotlib.mlab import find
threshold = 40
spinning = scipy.signal.medfilt(abs(gyros['z'][:,0]),kernel_size=5) > threshold
# make sure to always find end elements
spinning = numpy.concatenate((numpy.array([False]),spinning,numpy.array([False])))
start = find(spinning[1:] & ~spinning[0:-1])
stop = find(~spinning[1:] & spinning[0:-1])-1
tstart = gyros['time'][start]
tstop = gyros['time'][stop]
idx = numpy.zeros((0),dtype=int)
for i in arange(tstart.size):
i1 = abs(mag['time']-tstart[i]).argmin()
i2 = abs(mag['time']-tstop[i]).argmin()
idx = numpy.concatenate((idx,arange(i1,i2,dtype=int)))
return idx
if gyros is not None:
idx = find_spinning(mag,gyros)
else:
idx = arange(mag['time'].size)
mag_x = mag['x'][idx,0]
mag_y = mag['y'][idx,0]
mag_z = mag['z'][idx,0]
rx = max(mag_x) - min(mag_x)
ry = max(mag_y) - min(mag_y)
mx = rx / 2 + min(mag_x)
my = ry / 2 + min(mag_y)
def distortion(x,mag_x=mag_x,mag_y=mag_y,mag_z=mag_z,LH=LH,LV=LV):
""" loss function for distortion from spherical data """
from numpy import sqrt, mean
cor_x = mag_x * x[0] - x[3]
cor_y = mag_y * x[1] - x[4]
cor_z = mag_z * x[2] - x[5]
l = sqrt(cor_x**2 + cor_y**2 + cor_z**2)
L0 = sqrt(LH**2 + LV**2)
spherical_error = numpy.mean((l - L0)**2)
# note that ideally the horizontal error would be calculated
# after correcting for attitude but that requires high temporal
# accuracy from attitude which we don't want to requires. this
# works well in practice.
lh = sqrt(cor_x**2 + cor_y**2)
err = (lh - LH)**2
horizontal_error = numpy.mean(err)
# weight both the spherical error and the horizontal error
# components equally
return spherical_error+horizontal_error
cons = ({'type': 'ineq', 'fun' : lambda x: numpy.array([x[0] - 0.5])},
{'type': 'ineq', 'fun' : lambda x: numpy.array([x[1] - 0.5])},
{'type': 'ineq', 'fun' : lambda x: numpy.array([x[2] - 0.5])})
opts = {'xtol': 1e-8, 'disp': False, 'maxiter': 10000}
# method of COBYLA also works well
x0 = numpy.array([1, 1, 1, numpy.mean(mag_x), numpy.mean(mag_y), numpy.mean(mag_z)])
res = minimize(distortion, x0, method='COBYLA', options=opts, constraints=cons)
x = res.x
cor_x = mag_x * x[0] - x[3]
cor_y = mag_y * x[1] - x[4]
cor_z = mag_z * x[2] - x[5]
import matplotlib
from numpy import sqrt
matplotlib.pyplot.subplot(1,2,1)
matplotlib.pyplot.plot(cor_x,cor_y,'.',cor_x,cor_z,'.',cor_z,cor_y,'.')
#matplotlib.pyplot.xlim(-1,1)
#matplotlib.pyplot.ylim(-1,1)
matplotlib.pyplot.subplot(1,2,2)
matplotlib.pyplot.plot(sqrt(cor_x**2+cor_y**2+cor_z**2))
return res, cor_x, cor_y, cor_z
def main():
import sys, os
sys.path.insert(1, os.path.dirname(sys.path[0]))
from dronin import telemetry
uavo_list = telemetry.get_telemetry_by_args()
from dronin.uavo import UAVO_Magnetometer, UAVO_Gyros
print mag_calibration(uavo_list.as_numpy_array(UAVO_Magnetometer), uavo_list.as_numpy_array(UAVO_Gyros))
# Wait for user to close window.
matplotlib.pyplot.show()
if __name__ == "__main__":
main()
| gpl-3.0 |
NicovincX2/Python-3.5 | Algèbre/Algèbre linéaire/Algèbre multilinéaire/slice3.py | 1 | 7017 | # -*- coding: utf-8 -*-
"""
slice3.py - plot 3D data on a uniform tensor-product grid as a set of
three adjustable xy, yz, and xz plots
Copyright (c) 2013 Greg von Winckel
All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Created on Wed Dec 4 11:24:14 MST 2013
"""
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
def meshgrid3(x, y, z):
""" Create a three-dimensional meshgrid """
nx = len(x)
ny = len(y)
nz = len(z)
xx = np.swapaxes(np.reshape(np.tile(x, (1, ny, nz)), (nz, ny, nx)), 0, 2)
yy = np.swapaxes(np.reshape(np.tile(y, (nx, 1, nz)), (nx, nz, ny)), 1, 2)
zz = np.tile(z, (nx, ny, 1))
return xx, yy, zz
class DiscreteSlider(Slider):
"""A matplotlib slider widget with discrete steps.
Created by Joe Kington and submitted to StackOverflow on Dec 1 2012
http://stackoverflow.com/questions/13656387/can-i-make-matplotlib-sliders-more-discrete
"""
def __init__(self, *args, **kwargs):
"""Identical to Slider.__init__, except for the "increment" kwarg.
"increment" specifies the step size that the slider will be discritized
to."""
self.inc = kwargs.pop('increment', 1)
Slider.__init__(self, *args, **kwargs)
def set_val(self, val):
xy = self.poly.xy
xy[2] = val, 1
xy[3] = val, 0
self.poly.xy = xy
# Suppress slider label
self.valtext.set_text('')
if self.drawon:
self.ax.figure.canvas.draw()
self.val = val
if not self.eventson:
return
for cid, func in self.observers.iteritems():
func(val)
class slice3(object):
def __init__(self, xx, yy, zz, u):
self.x = xx[:, 0, 0]
self.y = yy[0, :, 0]
self.z = zz[0, 0, :]
self.data = u
self.fig = plt.figure(1, (20, 7))
self.ax1 = self.fig.add_subplot(131, aspect='equal')
self.ax2 = self.fig.add_subplot(132, aspect='equal')
self.ax3 = self.fig.add_subplot(133, aspect='equal')
self.xplot_zline = self.ax1.axvline(color='m', linestyle='--', lw=2)
self.xplot_zline.set_xdata(self.z[0])
self.xplot_yline = self.ax1.axhline(color='m', linestyle='--', lw=2)
self.xplot_yline.set_ydata(self.y[0])
self.yplot_xline = self.ax2.axhline(color='m', linestyle='--', lw=2)
self.yplot_xline.set_ydata(self.x[0])
self.yplot_zline = self.ax2.axvline(color='m', linestyle='--', lw=2)
self.yplot_zline.set_xdata(self.z[0])
self.zplot_xline = self.ax3.axvline(color='m', linestyle='--', lw=2)
self.zplot_xline.set_xdata(self.x[0])
self.zplot_yline = self.ax3.axhline(color='m', linestyle='--', lw=2)
self.zplot_yline.set_ydata(self.y[0])
self.xslice = self.ax1.imshow(u[0, :, :], extent=(
self.z[0], self.z[-1], self.y[0], self.y[-1]))
self.yslice = self.ax2.imshow(u[:, 0, :], extent=(
self.z[0], self.z[-1], self.x[0], self.x[-1]))
self.zslice = self.ax3.imshow(u[:, :, 0], extent=(
self.x[0], self.x[-1], self.y[0], self.y[-1]))
# Create and initialize x-slider
self.sliderax1 = self.fig.add_axes([0.125, 0.08, 0.225, 0.03])
self.sliderx = DiscreteSlider(
self.sliderax1, '', 0, len(self.x) - 1, increment=1, valinit=0)
self.sliderx.on_changed(self.update_x)
self.sliderx.set_val(0)
# Create and initialize y-slider
self.sliderax2 = self.fig.add_axes([0.4, 0.08, 0.225, 0.03])
self.slidery = DiscreteSlider(
self.sliderax2, '', 0, len(self.y) - 1, increment=1, valinit=0)
self.slidery.on_changed(self.update_y)
self.slidery.set_val(0)
# Create and initialize z-slider
self.sliderax3 = self.fig.add_axes([0.675, 0.08, 0.225, 0.03])
self.sliderz = DiscreteSlider(
self.sliderax3, '', 0, len(self.z) - 1, increment=1, valinit=0)
self.sliderz.on_changed(self.update_z)
self.sliderz.set_val(0)
z0, z1 = self.ax1.get_xlim()
x0, x1 = self.ax2.get_ylim()
y0, y1 = self.ax1.get_ylim()
self.ax1.set_aspect((z1 - z0) / (y1 - y0))
self.ax2.set_aspect((z1 - z0) / (x1 - x0))
self.ax3.set_aspect((x1 - x0) / (y1 - y0))
def xlabel(self, *args, **kwargs):
self.ax2.set_ylabel(*args, **kwargs)
self.ax3.set_xlabel(*args, **kwargs)
def ylabel(self, *args, **kwargs):
self.ax1.set_ylabel(*args, **kwargs)
self.ax3.set_ylabel(*args, **kwargs)
def zlabel(self, *args, **kwargs):
self.ax1.set_xlabel(*args, **kwargs)
self.ax2.set_xlabel(*args, **kwargs)
def update_x(self, value):
self.xslice.set_data(self.data[value, :, :])
self.yplot_xline.set_ydata(self.x[value])
self.zplot_xline.set_xdata(self.x[value])
def update_y(self, value):
self.yslice.set_data(self.data[:, value, :])
self.xplot_yline.set_ydata(self.y[value])
self.zplot_yline.set_ydata(self.y[value])
def update_z(self, value):
self.zslice.set_data(self.data[:, :, value])
self.xplot_zline.set_xdata(self.z[value])
self.yplot_zline.set_xdata(self.z[value])
def show(self):
plt.show()
if __name__ == '__main__':
# Number of x-grid points
nx = 100
# Number of
ny = 100
nz = 200
x = np.linspace(-4, 4, nx)
y = np.linspace(-4, 4, ny)
z = np.linspace(0, 8, nz)
xx, yy, zz = meshgrid3(x, y, z)
# Display three cross sections of a Gaussian Beam/Paraxial wave
u = np.real(np.exp(-(2 * xx**2 + yy**2) / (.2 + 2j * zz)) /
np.sqrt(.2 + 2j * zz))
s3 = slice3(xx, yy, zz, u)
s3.xlabel('x', fontsize=18)
s3.ylabel('y', fontsize=18)
s3.zlabel('z', fontsize=18)
s3.show()
os.system("pause")
| gpl-3.0 |
jaduimstra/nilmtk | nilmtk/metrics.py | 5 | 13373 | '''Metrics to compare disaggregation performance against ground truth
data.
All metrics functions have the same interface. Each function takes
`predictions` and `ground_truth` parameters. Both of which are
nilmtk.MeterGroup objects. Each function returns one of two types:
either a pd.Series or a single float. Most functions return a
pd.Series where each index element is a meter instance int or a tuple
of ints for MeterGroups.
Notation
--------
Below is the notation used to mathematically define each metric.
:math:`T` - number of time slices.
:math:`t` - a time slice.
:math:`N` - number of appliances.
:math:`n` - an appliance.
:math:`y^{(n)}_t` - ground truth power of appliance :math:`n` in time slice :math:`t`.
:math:`\\hat{y}^{(n)}_t` - estimated power of appliance :math:`n` in time slice :math:`t`.
:math:`x^{(n)}_t` - ground truth state of appliance :math:`n` in time slice :math:`t`.
:math:`\\hat{x}^{(n)}_t` - estimated state of appliance :math:`n` in time slice :math:`t`.
Functions
---------
'''
from __future__ import print_function, division
import numpy as np
import pandas as pd
import math
from .metergroup import MeterGroup, iterate_through_submeters_of_two_metergroups
from .electric import align_two_meters
def error_in_assigned_energy(predictions, ground_truth):
"""Compute error in assigned energy.
.. math::
error^{(n)} =
\\left | \\sum_t y^{(n)}_t - \\sum_t \\hat{y}^{(n)}_t \\right |
Parameters
----------
predictions, ground_truth : nilmtk.MeterGroup
Returns
-------
errors : pd.Series
Each index is an meter instance int (or tuple for MeterGroups).
Each value is the absolute error in assigned energy for that appliance,
in kWh.
"""
errors = {}
both_sets_of_meters = iterate_through_submeters_of_two_metergroups(
predictions, ground_truth)
for pred_meter, ground_truth_meter in both_sets_of_meters:
sections = pred_meter.good_sections()
ground_truth_energy = ground_truth_meter.total_energy(sections=sections)
predicted_energy = pred_meter.total_energy(sections=sections)
errors[pred_meter.instance()] = np.abs(ground_truth_energy - predicted_energy)
return pd.Series(errors)
def fraction_energy_assigned_correctly(predictions, ground_truth):
'''Compute fraction of energy assigned correctly
.. math::
fraction =
\\sum_n min \\left (
\\frac{\\sum_n y}{\\sum_{n,t} y},
\\frac{\\sum_n \\hat{y}}{\\sum_{n,t} \\hat{y}}
\\right )
Ignores distinction between different AC types, instead if there are
multiple AC types for each meter then we just take the max value across
the AC types.
Parameters
----------
predictions, ground_truth : nilmtk.MeterGroup
Returns
-------
fraction : float in the range [0,1]
Fraction of Energy Correctly Assigned.
'''
predictions_submeters = MeterGroup(meters=predictions.submeters().meters)
ground_truth_submeters = MeterGroup(meters=ground_truth.submeters().meters)
fraction_per_meter_predictions = predictions_submeters.fraction_per_meter()
fraction_per_meter_ground_truth = ground_truth_submeters.fraction_per_meter()
fraction_per_meter_ground_truth.index = fraction_per_meter_ground_truth.index.map(lambda meter: meter.instance)
fraction_per_meter_predictions.index = fraction_per_meter_predictions.index.map(lambda meter: meter.instance)
fraction = 0
for meter_instance in predictions_submeters.instance():
fraction += min(fraction_per_meter_ground_truth[meter_instance],
fraction_per_meter_predictions[meter_instance])
return fraction
def mean_normalized_error_power(predictions, ground_truth):
'''Compute mean normalized error in assigned power
.. math::
error^{(n)} =
\\frac
{ \\sum_t {\\left | y_t^{(n)} - \\hat{y}_t^{(n)} \\right |} }
{ \\sum_t y_t^{(n)} }
Parameters
----------
predictions, ground_truth : nilmtk.MeterGroup
Returns
-------
mne : pd.Series
Each index is an meter instance int (or tuple for MeterGroups).
Each value is the MNE for that appliance.
'''
mne = {}
both_sets_of_meters = iterate_through_submeters_of_two_metergroups(
predictions, ground_truth)
for pred_meter, ground_truth_meter in both_sets_of_meters:
total_abs_diff = 0.0
sum_of_ground_truth_power = 0.0
for aligned_meters_chunk in align_two_meters(pred_meter,
ground_truth_meter):
diff = aligned_meters_chunk.icol(0) - aligned_meters_chunk.icol(1)
total_abs_diff += sum(abs(diff.dropna()))
sum_of_ground_truth_power += aligned_meters_chunk.icol(1).sum()
mne[pred_meter.instance()] = total_abs_diff / sum_of_ground_truth_power
return pd.Series(mne)
def rms_error_power(predictions, ground_truth):
'''Compute RMS error in assigned power
.. math::
error^{(n)} = \\sqrt{ \\frac{1}{T} \\sum_t{ \\left ( y_t - \\hat{y}_t \\right )^2 } }
Parameters
----------
predictions, ground_truth : nilmtk.MeterGroup
Returns
-------
error : pd.Series
Each index is an meter instance int (or tuple for MeterGroups).
Each value is the RMS error in predicted power for that appliance.
'''
error = {}
both_sets_of_meters = iterate_through_submeters_of_two_metergroups(
predictions, ground_truth)
for pred_meter, ground_truth_meter in both_sets_of_meters:
sum_of_squared_diff = 0.0
n_samples = 0
for aligned_meters_chunk in align_two_meters(pred_meter,
ground_truth_meter):
diff = aligned_meters_chunk.icol(0) - aligned_meters_chunk.icol(1)
diff.dropna(inplace=True)
sum_of_squared_diff += (diff ** 2).sum()
n_samples += len(diff)
error[pred_meter.instance()] = math.sqrt(sum_of_squared_diff / n_samples)
return pd.Series(error)
def f1_score(predictions, ground_truth):
'''Compute F1 scores.
.. math::
F_{score}^{(n)} = \\frac
{2 * Precision * Recall}
{Precision + Recall}
Parameters
----------
predictions, ground_truth : nilmtk.MeterGroup
Returns
-------
f1_scores : pd.Series
Each index is an meter instance int (or tuple for MeterGroups).
Each value is the F1 score for that appliance. If there are multiple
chunks then the value is the weighted mean of the F1 score for
each chunk.
'''
# If we import sklearn at top of file then sphinx breaks.
from sklearn.metrics import f1_score as sklearn_f1_score
# sklearn produces lots of DepreciationWarnings with PyTables
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
f1_scores = {}
both_sets_of_meters = iterate_through_submeters_of_two_metergroups(
predictions, ground_truth)
for pred_meter, ground_truth_meter in both_sets_of_meters:
scores_for_meter = pd.DataFrame(columns=['score', 'n_samples'])
for aligned_states_chunk in align_two_meters(pred_meter,
ground_truth_meter,
'when_on'):
aligned_states_chunk.dropna(inplace=True)
aligned_states_chunk = aligned_states_chunk.astype(int)
score = sklearn_f1_score(aligned_states_chunk.icol(0),
aligned_states_chunk.icol(1))
scores_for_meter = scores_for_meter.append(
{'score': score, 'n_samples': len(aligned_states_chunk)},
ignore_index=True)
# Calculate weighted mean
tot_samples = scores_for_meter['n_samples'].sum()
scores_for_meter['proportion'] = (scores_for_meter['n_samples'] /
tot_samples)
avg_score = (scores_for_meter['score'] *
scores_for_meter['proportion']).sum()
f1_scores[pred_meter.instance()] = avg_score
return pd.Series(f1_scores)
##### FUNCTIONS BELOW THIS LINE HAVE NOT YET BEEN CONVERTED TO NILMTK v0.2 #####
"""
def confusion_matrices(predicted_states, ground_truth_states):
'''Compute confusion matrix between appliance states for each appliance
Parameters
----------
predicted_state: Pandas DataFrame of type {appliance :
[array of predicted states]}
ground_truth_state: Pandas DataFrame of type {appliance :
[array of ground truth states]}
Returns
-------
dict of type {appliance : confusion matrix}
'''
re = {}
for appliance in predicted_states:
matrix = np.zeros([np.max(ground_truth_states[appliance]) + 1,
np.max(ground_truth_states[appliance]) + 1])
for time in predicted_states[appliance]:
matrix[predicted_states.values[time, appliance],
ground_truth_states.values[time, appliance]] += 1
re[appliance] = matrix
return re
def tp_fp_fn_tn(predicted_states, ground_truth_states):
'''Compute counts of True Positives, False Positives, False Negatives, True Negatives
.. math::
TP^{(n)} =
\\sum_{t}
and \\left ( x^{(n)}_t = on, \\hat{x}^{(n)}_t = on \\right )
FP^{(n)} =
\\sum_{t}
and \\left ( x^{(n)}_t = off, \\hat{x}^{(n)}_t = on \\right )
FN^{(n)} =
\\sum_{t}
and \\left ( x^{(n)}_t = on, \\hat{x}^{(n)}_t = off \\right )
TN^{(n)} =
\\sum_{t}
and \\left ( x^{(n)}_t = off, \\hat{x}^{(n)}_t = off \\right )
Parameters
----------
predicted_state: Pandas DataFrame of type {appliance :
[array of predicted states]}
ground_truth_state: Pandas DataFrame of type {appliance :
[array of ground truth states]}
Returns
-------
numpy array where columns represent appliances and rows represent: [TP, FP, FN, TN]
'''
# assumes state 0 = off, all other states = on
predicted_states_on = predicted_states > 0
ground_truth_states_on = ground_truth_states > 0
tp = np.sum(np.logical_and(predicted_states_on.values == True,
ground_truth_states_on.values == True), axis=0)
fp = np.sum(np.logical_and(predicted_states_on.values == True,
ground_truth_states_on.values == False), axis=0)
fn = np.sum(np.logical_and(predicted_states_on.values == False,
ground_truth_states_on.values == True), axis=0)
tn = np.sum(np.logical_and(predicted_states_on.values == False,
ground_truth_states_on.values == False), axis=0)
return np.array([tp, fp, fn, tn]).astype(float)
def tpr_fpr(predicted_states, ground_truth_states):
'''Compute True Positive Rate and False Negative Rate
.. math::
TPR^{(n)} = \\frac{TP}{\\left ( TP + FN \\right )}
FPR^{(n)} = \\frac{FP}{\\left ( FP + TN \\right )}
Parameters
----------
predicted_state: Pandas DataFrame of type {appliance :
[array of predicted states]}
ground_truth_state: Pandas DataFrame of type {appliance :
[array of ground truth states]}
Returns
-------
numpy array where columns represent appliances and rows represent: [TPR, FPR]
'''
tfpn = tp_fp_fn_tn(predicted_states, ground_truth_states)
tpr = tfpn[0, :] / (tfpn[0, :] + tfpn[2, :])
fpr = tfpn[1, :] / (tfpn[1, :] + tfpn[3, :])
return np.array([tpr, fpr])
def precision_recall(predicted_states, ground_truth_states):
'''Compute Precision and Recall
.. math::
Precision^{(n)} = \\frac{TP}{\\left ( TP + FP \\right )}
Recall^{(n)} = \\frac{TP}{\\left ( TP + FN \\right )}
Parameters
----------
predicted_state: Pandas DataFrame of type {appliance :
[array of predicted states]}
ground_truth_state: Pandas DataFrame of type {appliance :
[array of ground truth states]}
Returns
-------
numpy array where columns represent appliances and rows represent: [Precision, Recall]
'''
tfpn = tp_fp_fn_tn(predicted_states, ground_truth_states)
prec = tfpn[0, :] / (tfpn[0, :] + tfpn[1, :])
rec = tfpn[0, :] / (tfpn[0, :] + tfpn[2, :])
return np.array([prec, rec])
def hamming_loss(predicted_state, ground_truth_state):
'''Compute Hamming loss
.. math::
HammingLoss =
\\frac{1}{T} \\sum_{t}
\\frac{1}{N} \\sum_{n}
xor \\left ( x^{(n)}_t, \\hat{x}^{(n)}_t \\right )
Parameters
----------
predicted_state: Pandas DataFrame of type {appliance :
[array of predicted states]}
ground_truth_state: Pandas DataFrame of type {appliance :
[array of ground truth states]}
Returns
-------
float of hamming_loss
'''
num_appliances = np.size(ground_truth_state.values, axis=1)
xors = np.sum((predicted_state.values != ground_truth_state.values),
axis=1) / num_appliances
return np.mean(xors)
"""
| apache-2.0 |
bastibl/gnuradio | gr-dtv/examples/atsc_ctrlport_monitor.py | 7 | 6277 | #!/usr/bin/env python
#
# Copyright 2015 Free Software Foundation
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import sys
import matplotlib
matplotlib.use("QT4Agg")
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from gnuradio.ctrlport.GNURadioControlPortClient import (
GNURadioControlPortClient, TTransportException,
)
import numpy
from numpy.fft import fftpack
"""
If a host is running the ATSC receiver chain with ControlPort
turned on, this script will connect to the host using the hostname and
port pair of the ControlPort instance and display metrics of the
receiver. The ATSC publishes information about the success of the
Reed-Solomon decoder and Viterbi metrics for use here in displaying
the link quality. This also gets the equalizer taps of the receiver
and displays the frequency response.
"""
class atsc_ctrlport_monitor(object):
def __init__(self, host, port):
argv = [None, host, port]
radiosys = GNURadioControlPortClient(argv=argv, rpcmethod='thrift')
self.radio = radiosys.client
print(self.radio)
vt_init_key = 'dtv_atsc_viterbi_decoder0::decoder_metrics'
data = self.radio.getKnobs([vt_init_key])[vt_init_key]
init_metric = numpy.mean(data.value)
self._viterbi_metric = 100*[init_metric,]
table_col_labels = ('Num Packets', 'Error Rate', 'Packet Error Rate',
'Viterbi Metric', 'SNR')
self._fig = plt.figure(1, figsize=(12,12), facecolor='w')
self._sp0 = self._fig.add_subplot(4,1,1)
self._sp1 = self._fig.add_subplot(4,1,2)
self._sp2 = self._fig.add_subplot(4,1,3)
self._plot_taps = self._sp0.plot([], [], 'k', linewidth=2)
self._plot_psd = self._sp1.plot([], [], 'k', linewidth=2)
self._plot_data = self._sp2.plot([], [], 'ok', linewidth=2, markersize=4, alpha=0.05)
self._ax2 = self._fig.add_subplot(4,1,4)
self._table = self._ax2.table(cellText=[len(table_col_labels)*['0']],
colLabels=table_col_labels,
loc='center')
self._ax2.axis('off')
cells = self._table.properties()['child_artists']
for c in cells:
c.set_lw(0.1) # set's line width
c.set_ls('solid')
c.set_height(0.2)
ani = animation.FuncAnimation(self._fig, self.update_data, frames=200,
fargs=(self._plot_taps[0], self._plot_psd[0],
self._plot_data[0], self._table),
init_func=self.init_function,
blit=True)
plt.show()
def update_data(self, x, taps, psd, syms, table):
try:
eqdata_key = 'dtv_atsc_equalizer0::taps'
symdata_key = 'dtv_atsc_equalizer0::data'
rs_nump_key = 'dtv_atsc_rs_decoder0::num_packets'
rs_numbp_key = 'dtv_atsc_rs_decoder0::num_bad_packets'
rs_numerrs_key = 'dtv_atsc_rs_decoder0::num_errors_corrected'
vt_metrics_key = 'dtv_atsc_viterbi_decoder0::decoder_metrics'
snr_key = 'probe2_f0::SNR'
data = self.radio.getKnobs([])
eqdata = data[eqdata_key]
symdata = data[symdata_key]
rs_num_packets = data[rs_nump_key]
rs_num_bad_packets = data[rs_numbp_key]
rs_num_errors_corrected = data[rs_numerrs_key]
vt_decoder_metrics = data[vt_metrics_key]
snr_est = data[snr_key]
vt_decoder_metrics = numpy.mean(vt_decoder_metrics.value)
self._viterbi_metric.pop()
self._viterbi_metric.insert(0, vt_decoder_metrics)
except TTransportException:
sys.stderr.write("Lost connection, exiting")
sys.exit(1)
ntaps = len(eqdata.value)
taps.set_ydata(eqdata.value)
taps.set_xdata(list(range(ntaps)))
self._sp0.set_xlim(0, ntaps)
self._sp0.set_ylim(min(eqdata.value), max(eqdata.value))
fs = 6.25e6
freq = numpy.linspace(-fs / 2, fs / 2, 10000)
H = numpy.fft.fftshift(fftpack.fft(eqdata.value, 10000))
HdB = 20.0*numpy.log10(abs(H))
psd.set_ydata(HdB)
psd.set_xdata(freq)
self._sp1.set_xlim(0, fs / 2)
self._sp1.set_ylim([min(HdB), max(HdB)])
self._sp1.set_yticks([min(HdB), max(HdB)])
self._sp1.set_yticklabels(["min", "max"])
nsyms = len(symdata.value)
syms.set_ydata(symdata.value)
syms.set_xdata(nsyms*[0,])
self._sp2.set_xlim([-1, 1])
self._sp2.set_ylim([-10, 10])
per = float(rs_num_bad_packets.value) / float(rs_num_packets.value)
ber = float(rs_num_errors_corrected.value) / float(187*rs_num_packets.value)
table._cells[(1,0)]._text.set_text("{0}".format(rs_num_packets.value))
table._cells[(1,1)]._text.set_text("{0:.2g}".format(ber))
table._cells[(1,2)]._text.set_text("{0:.2g}".format(per))
table._cells[(1,3)]._text.set_text("{0:.1f}".format(numpy.mean(self._viterbi_metric)))
table._cells[(1,4)]._text.set_text("{0:.4f}".format(snr_est.value[0]))
return (taps, psd, syms, table)
def init_function(self):
return self._plot_taps + self._plot_psd + self._plot_data
if __name__ == "__main__":
host = sys.argv[1]
port = sys.argv[2]
m = atsc_ctrlport_monitor(host, port)
| gpl-3.0 |
nberliner/SRVis | lib/dataHandler.py | 1 | 3758 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
SRVis Copyright (C) 2015 Niklas Berliner
"""
import sys
import numpy as np
import tifffile as Tiff
from localisationClass import rapidstormLocalisations, XYTLocalisations
#from visualiseLocalisations import QuadTree
class dataHandler():
"""
Interface between the data and the SRVis application
The super-resolution data is stored in a pandas DataFrame and the TIFF image
is read using tifffile.py (see http://www.lfd.uci.edu/~gohlke/code/tifffile.py.html)
"""
def __init__(self, fnameImage, fnameLocalisations, fnameLocalisationsType, pixelSize, CpPh):
self.fnameLocalisations = fnameLocalisations
self.fnameLocalisationsType = fnameLocalisationsType
self.pixelSize = pixelSize
self.CpPh = CpPh
if fnameImage == None or fnameImage == '':
self.image = None
else:
print 'Reading the image'
self.image = Tiff.TiffFile(fnameImage)
print 'Reading the localisations'
self._loadLocalisations()
def _loadLocalisations(self):
# Here other localisation data types can be added if desired
if self.fnameLocalisationsType == 'rapidstorm':
self.data = rapidstormLocalisations()
self.data.readFile(self.fnameLocalisations, photonConversion=self.CpPh, pixelSize=self.pixelSize)
elif self.fnameLocalisationsType == 'xyt':
self.data = XYTLocalisations()
self.data.readFile(self.fnameLocalisations, pixelSize=self.pixelSize)
else:
print 'No localisation type is checked. Something went wrong..exiting'
sys.exit() # Very ugly! Should be changed to a popup!
def reloadData(self, dataType):
if dataType == 'localisations':
self._loadLocalisations()
def getImage(self, frame):
""" Returns the frame as np.array """
return self.image[frame].asarray()
def maxImageFrame(self):
""" Returns the number of frames """
if self.image == None:
return 0
else:
return len(self.image)
def getLocalisations(self, frame):
""" Return X and Y localisation data as numpy arrays that can be
directly used in a matplotlib scatter plot """
data = self.data.localisations()
xy = np.asarray(data[ data['frame'] == frame ][['x','y']])
return xy[:,0], xy[:,1]
def filterData(self, filterValues):
""" Filter the localisation data based on the filter conditions in
filterValues.
filterValues must be a dict with dataType that should be filtered as
keys and the min and max values as values, i.e. e.g.
filterValues = dict()
filterValues['SNR'] = (20, None)
"""
self.data.filterAll(filterValues, relative=False)
def saveLocalisations(self, fname, pxSize):
""" Save the (filtered) localisations to disk """
self.data.writeToFile(fname, pixelSize=pxSize)
| gpl-3.0 |
wesm/arrow | python/pyarrow/tests/test_schema.py | 4 | 20872 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
import pickle
import sys
import weakref
import pytest
import numpy as np
import pyarrow as pa
import pyarrow.tests.util as test_util
from pyarrow.vendored.version import Version
def test_schema_constructor_errors():
msg = ("Do not call Schema's constructor directly, use `pyarrow.schema` "
"instead")
with pytest.raises(TypeError, match=msg):
pa.Schema()
def test_type_integers():
dtypes = ['int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64']
for name in dtypes:
factory = getattr(pa, name)
t = factory()
assert str(t) == name
def test_type_to_pandas_dtype():
M8_ns = np.dtype('datetime64[ns]')
cases = [
(pa.null(), np.object_),
(pa.bool_(), np.bool_),
(pa.int8(), np.int8),
(pa.int16(), np.int16),
(pa.int32(), np.int32),
(pa.int64(), np.int64),
(pa.uint8(), np.uint8),
(pa.uint16(), np.uint16),
(pa.uint32(), np.uint32),
(pa.uint64(), np.uint64),
(pa.float16(), np.float16),
(pa.float32(), np.float32),
(pa.float64(), np.float64),
(pa.date32(), M8_ns),
(pa.date64(), M8_ns),
(pa.timestamp('ms'), M8_ns),
(pa.binary(), np.object_),
(pa.binary(12), np.object_),
(pa.string(), np.object_),
(pa.list_(pa.int8()), np.object_),
# (pa.list_(pa.int8(), 2), np.object_), # TODO needs pandas conversion
(pa.map_(pa.int64(), pa.float64()), np.object_),
]
for arrow_type, numpy_type in cases:
assert arrow_type.to_pandas_dtype() == numpy_type
@pytest.mark.pandas
def test_type_to_pandas_dtype_check_import():
# ARROW-7980
test_util.invoke_script('arrow_7980.py')
def test_type_list():
value_type = pa.int32()
list_type = pa.list_(value_type)
assert str(list_type) == 'list<item: int32>'
field = pa.field('my_item', pa.string())
l2 = pa.list_(field)
assert str(l2) == 'list<my_item: string>'
def test_type_comparisons():
val = pa.int32()
assert val == pa.int32()
assert val == 'int32'
assert val != 5
def test_type_for_alias():
cases = [
('i1', pa.int8()),
('int8', pa.int8()),
('i2', pa.int16()),
('int16', pa.int16()),
('i4', pa.int32()),
('int32', pa.int32()),
('i8', pa.int64()),
('int64', pa.int64()),
('u1', pa.uint8()),
('uint8', pa.uint8()),
('u2', pa.uint16()),
('uint16', pa.uint16()),
('u4', pa.uint32()),
('uint32', pa.uint32()),
('u8', pa.uint64()),
('uint64', pa.uint64()),
('f4', pa.float32()),
('float32', pa.float32()),
('f8', pa.float64()),
('float64', pa.float64()),
('date32', pa.date32()),
('date64', pa.date64()),
('string', pa.string()),
('str', pa.string()),
('binary', pa.binary()),
('time32[s]', pa.time32('s')),
('time32[ms]', pa.time32('ms')),
('time64[us]', pa.time64('us')),
('time64[ns]', pa.time64('ns')),
('timestamp[s]', pa.timestamp('s')),
('timestamp[ms]', pa.timestamp('ms')),
('timestamp[us]', pa.timestamp('us')),
('timestamp[ns]', pa.timestamp('ns')),
('duration[s]', pa.duration('s')),
('duration[ms]', pa.duration('ms')),
('duration[us]', pa.duration('us')),
('duration[ns]', pa.duration('ns')),
]
for val, expected in cases:
assert pa.type_for_alias(val) == expected
def test_type_string():
t = pa.string()
assert str(t) == 'string'
def test_type_timestamp_with_tz():
tz = 'America/Los_Angeles'
t = pa.timestamp('ns', tz=tz)
assert t.unit == 'ns'
assert t.tz == tz
def test_time_types():
t1 = pa.time32('s')
t2 = pa.time32('ms')
t3 = pa.time64('us')
t4 = pa.time64('ns')
assert t1.unit == 's'
assert t2.unit == 'ms'
assert t3.unit == 'us'
assert t4.unit == 'ns'
assert str(t1) == 'time32[s]'
assert str(t4) == 'time64[ns]'
with pytest.raises(ValueError):
pa.time32('us')
with pytest.raises(ValueError):
pa.time64('s')
def test_from_numpy_dtype():
cases = [
(np.dtype('bool'), pa.bool_()),
(np.dtype('int8'), pa.int8()),
(np.dtype('int16'), pa.int16()),
(np.dtype('int32'), pa.int32()),
(np.dtype('int64'), pa.int64()),
(np.dtype('uint8'), pa.uint8()),
(np.dtype('uint16'), pa.uint16()),
(np.dtype('uint32'), pa.uint32()),
(np.dtype('float16'), pa.float16()),
(np.dtype('float32'), pa.float32()),
(np.dtype('float64'), pa.float64()),
(np.dtype('U'), pa.string()),
(np.dtype('S'), pa.binary()),
(np.dtype('datetime64[s]'), pa.timestamp('s')),
(np.dtype('datetime64[ms]'), pa.timestamp('ms')),
(np.dtype('datetime64[us]'), pa.timestamp('us')),
(np.dtype('datetime64[ns]'), pa.timestamp('ns')),
(np.dtype('timedelta64[s]'), pa.duration('s')),
(np.dtype('timedelta64[ms]'), pa.duration('ms')),
(np.dtype('timedelta64[us]'), pa.duration('us')),
(np.dtype('timedelta64[ns]'), pa.duration('ns')),
]
for dt, pt in cases:
result = pa.from_numpy_dtype(dt)
assert result == pt
# Things convertible to numpy dtypes work
assert pa.from_numpy_dtype('U') == pa.string()
assert pa.from_numpy_dtype(np.str_) == pa.string()
assert pa.from_numpy_dtype('int32') == pa.int32()
assert pa.from_numpy_dtype(bool) == pa.bool_()
with pytest.raises(NotImplementedError):
pa.from_numpy_dtype(np.dtype('O'))
with pytest.raises(TypeError):
pa.from_numpy_dtype('not_convertible_to_dtype')
def test_schema():
fields = [
pa.field('foo', pa.int32()),
pa.field('bar', pa.string()),
pa.field('baz', pa.list_(pa.int8()))
]
sch = pa.schema(fields)
assert sch.names == ['foo', 'bar', 'baz']
assert sch.types == [pa.int32(), pa.string(), pa.list_(pa.int8())]
assert len(sch) == 3
assert sch[0].name == 'foo'
assert sch[0].type == fields[0].type
assert sch.field('foo').name == 'foo'
assert sch.field('foo').type == fields[0].type
assert repr(sch) == """\
foo: int32
bar: string
baz: list<item: int8>
child 0, item: int8"""
with pytest.raises(TypeError):
pa.schema([None])
def test_schema_weakref():
fields = [
pa.field('foo', pa.int32()),
pa.field('bar', pa.string()),
pa.field('baz', pa.list_(pa.int8()))
]
schema = pa.schema(fields)
wr = weakref.ref(schema)
assert wr() is not None
del schema
assert wr() is None
def test_schema_to_string_with_metadata():
lorem = """\
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla accumsan vel
turpis et mollis. Aliquam tincidunt arcu id tortor blandit blandit. Donec
eget leo quis lectus scelerisque varius. Class aptent taciti sociosqu ad
litora torquent per conubia nostra, per inceptos himenaeos. Praesent
faucibus, diam eu volutpat iaculis, tellus est porta ligula, a efficitur
turpis nulla facilisis quam. Aliquam vitae lorem erat. Proin a dolor ac libero
dignissim mollis vitae eu mauris. Quisque posuere tellus vitae massa
pellentesque sagittis. Aenean feugiat, diam ac dignissim fermentum, lorem
sapien commodo massa, vel volutpat orci nisi eu justo. Nulla non blandit
sapien. Quisque pretium vestibulum urna eu vehicula."""
# ARROW-7063
my_schema = pa.schema([pa.field("foo", "int32", False,
metadata={"key1": "value1"}),
pa.field("bar", "string", True,
metadata={"key3": "value3"})],
metadata={"lorem": lorem})
assert my_schema.to_string() == """\
foo: int32 not null
-- field metadata --
key1: 'value1'
bar: string
-- field metadata --
key3: 'value3'
-- schema metadata --
lorem: '""" + lorem[:65] + "' + " + str(len(lorem) - 65)
# Metadata that exactly fits
result = pa.schema([('f0', 'int32')],
metadata={'key': 'value' + 'x' * 62}).to_string()
assert result == """\
f0: int32
-- schema metadata --
key: 'valuexxxxxxxxxxxxxxxxxxxxxxxxxxxxx\
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'"""
assert my_schema.to_string(truncate_metadata=False) == """\
foo: int32 not null
-- field metadata --
key1: 'value1'
bar: string
-- field metadata --
key3: 'value3'
-- schema metadata --
lorem: '{}'""".format(lorem)
assert my_schema.to_string(truncate_metadata=False,
show_field_metadata=False) == """\
foo: int32 not null
bar: string
-- schema metadata --
lorem: '{}'""".format(lorem)
assert my_schema.to_string(truncate_metadata=False,
show_schema_metadata=False) == """\
foo: int32 not null
-- field metadata --
key1: 'value1'
bar: string
-- field metadata --
key3: 'value3'"""
assert my_schema.to_string(truncate_metadata=False,
show_field_metadata=False,
show_schema_metadata=False) == """\
foo: int32 not null
bar: string"""
def test_schema_from_tuples():
fields = [
('foo', pa.int32()),
('bar', pa.string()),
('baz', pa.list_(pa.int8())),
]
sch = pa.schema(fields)
assert sch.names == ['foo', 'bar', 'baz']
assert sch.types == [pa.int32(), pa.string(), pa.list_(pa.int8())]
assert len(sch) == 3
assert repr(sch) == """\
foo: int32
bar: string
baz: list<item: int8>
child 0, item: int8"""
with pytest.raises(TypeError):
pa.schema([('foo', None)])
def test_schema_from_mapping():
fields = OrderedDict([
('foo', pa.int32()),
('bar', pa.string()),
('baz', pa.list_(pa.int8())),
])
sch = pa.schema(fields)
assert sch.names == ['foo', 'bar', 'baz']
assert sch.types == [pa.int32(), pa.string(), pa.list_(pa.int8())]
assert len(sch) == 3
assert repr(sch) == """\
foo: int32
bar: string
baz: list<item: int8>
child 0, item: int8"""
fields = OrderedDict([('foo', None)])
with pytest.raises(TypeError):
pa.schema(fields)
def test_schema_duplicate_fields():
fields = [
pa.field('foo', pa.int32()),
pa.field('bar', pa.string()),
pa.field('foo', pa.list_(pa.int8())),
]
sch = pa.schema(fields)
assert sch.names == ['foo', 'bar', 'foo']
assert sch.types == [pa.int32(), pa.string(), pa.list_(pa.int8())]
assert len(sch) == 3
assert repr(sch) == """\
foo: int32
bar: string
foo: list<item: int8>
child 0, item: int8"""
assert sch[0].name == 'foo'
assert sch[0].type == fields[0].type
with pytest.warns(FutureWarning):
assert sch.field_by_name('bar') == fields[1]
with pytest.warns(FutureWarning):
assert sch.field_by_name('xxx') is None
with pytest.warns((UserWarning, FutureWarning)):
assert sch.field_by_name('foo') is None
# Schema::GetFieldIndex
assert sch.get_field_index('foo') == -1
# Schema::GetAllFieldIndices
assert sch.get_all_field_indices('foo') == [0, 2]
def test_field_flatten():
f0 = pa.field('foo', pa.int32()).with_metadata({b'foo': b'bar'})
assert f0.flatten() == [f0]
f1 = pa.field('bar', pa.float64(), nullable=False)
ff = pa.field('ff', pa.struct([f0, f1]), nullable=False)
assert ff.flatten() == [
pa.field('ff.foo', pa.int32()).with_metadata({b'foo': b'bar'}),
pa.field('ff.bar', pa.float64(), nullable=False)] # XXX
# Nullable parent makes flattened child nullable
ff = pa.field('ff', pa.struct([f0, f1]))
assert ff.flatten() == [
pa.field('ff.foo', pa.int32()).with_metadata({b'foo': b'bar'}),
pa.field('ff.bar', pa.float64())]
fff = pa.field('fff', pa.struct([ff]))
assert fff.flatten() == [pa.field('fff.ff', pa.struct([f0, f1]))]
def test_schema_add_remove_metadata():
fields = [
pa.field('foo', pa.int32()),
pa.field('bar', pa.string()),
pa.field('baz', pa.list_(pa.int8()))
]
s1 = pa.schema(fields)
assert s1.metadata is None
metadata = {b'foo': b'bar', b'pandas': b'badger'}
s2 = s1.with_metadata(metadata)
assert s2.metadata == metadata
s3 = s2.remove_metadata()
assert s3.metadata is None
# idempotent
s4 = s3.remove_metadata()
assert s4.metadata is None
def test_schema_equals():
fields = [
pa.field('foo', pa.int32()),
pa.field('bar', pa.string()),
pa.field('baz', pa.list_(pa.int8()))
]
metadata = {b'foo': b'bar', b'pandas': b'badger'}
sch1 = pa.schema(fields)
sch2 = pa.schema(fields)
sch3 = pa.schema(fields, metadata=metadata)
sch4 = pa.schema(fields, metadata=metadata)
assert sch1.equals(sch2, check_metadata=True)
assert sch3.equals(sch4, check_metadata=True)
assert sch1.equals(sch3)
assert not sch1.equals(sch3, check_metadata=True)
assert not sch1.equals(sch3, check_metadata=True)
del fields[-1]
sch3 = pa.schema(fields)
assert not sch1.equals(sch3)
def test_schema_equals_propagates_check_metadata():
# ARROW-4088
schema1 = pa.schema([
pa.field('foo', pa.int32()),
pa.field('bar', pa.string())
])
schema2 = pa.schema([
pa.field('foo', pa.int32()),
pa.field('bar', pa.string(), metadata={'a': 'alpha'}),
])
assert not schema1.equals(schema2, check_metadata=True)
assert schema1.equals(schema2)
def test_schema_equals_invalid_type():
# ARROW-5873
schema = pa.schema([pa.field("a", pa.int64())])
for val in [None, 'string', pa.array([1, 2])]:
with pytest.raises(TypeError):
schema.equals(val)
def test_schema_equality_operators():
fields = [
pa.field('foo', pa.int32()),
pa.field('bar', pa.string()),
pa.field('baz', pa.list_(pa.int8()))
]
metadata = {b'foo': b'bar', b'pandas': b'badger'}
sch1 = pa.schema(fields)
sch2 = pa.schema(fields)
sch3 = pa.schema(fields, metadata=metadata)
sch4 = pa.schema(fields, metadata=metadata)
assert sch1 == sch2
assert sch3 == sch4
# __eq__ and __ne__ do not check metadata
assert sch1 == sch3
assert not sch1 != sch3
assert sch2 == sch4
# comparison with other types doesn't raise
assert sch1 != []
assert sch3 != 'foo'
def test_schema_get_fields():
fields = [
pa.field('foo', pa.int32()),
pa.field('bar', pa.string()),
pa.field('baz', pa.list_(pa.int8()))
]
schema = pa.schema(fields)
assert schema.field('foo').name == 'foo'
assert schema.field(0).name == 'foo'
assert schema.field(-1).name == 'baz'
with pytest.raises(KeyError):
schema.field('other')
with pytest.raises(TypeError):
schema.field(0.0)
with pytest.raises(IndexError):
schema.field(4)
def test_schema_negative_indexing():
fields = [
pa.field('foo', pa.int32()),
pa.field('bar', pa.string()),
pa.field('baz', pa.list_(pa.int8()))
]
schema = pa.schema(fields)
assert schema[-1].equals(schema[2])
assert schema[-2].equals(schema[1])
assert schema[-3].equals(schema[0])
with pytest.raises(IndexError):
schema[-4]
with pytest.raises(IndexError):
schema[3]
def test_schema_repr_with_dictionaries():
fields = [
pa.field('one', pa.dictionary(pa.int16(), pa.string())),
pa.field('two', pa.int32())
]
sch = pa.schema(fields)
expected = (
"""\
one: dictionary<values=string, indices=int16, ordered=0>
two: int32""")
assert repr(sch) == expected
def test_type_schema_pickling():
cases = [
pa.int8(),
pa.string(),
pa.binary(),
pa.binary(10),
pa.list_(pa.string()),
pa.map_(pa.string(), pa.int8()),
pa.struct([
pa.field('a', 'int8'),
pa.field('b', 'string')
]),
pa.union([
pa.field('a', pa.int8()),
pa.field('b', pa.int16())
], pa.lib.UnionMode_SPARSE),
pa.union([
pa.field('a', pa.int8()),
pa.field('b', pa.int16())
], pa.lib.UnionMode_DENSE),
pa.time32('s'),
pa.time64('us'),
pa.date32(),
pa.date64(),
pa.timestamp('ms'),
pa.timestamp('ns'),
pa.decimal128(12, 2),
pa.decimal256(76, 38),
pa.field('a', 'string', metadata={b'foo': b'bar'})
]
for val in cases:
roundtripped = pickle.loads(pickle.dumps(val))
assert val == roundtripped
fields = []
for i, f in enumerate(cases):
if isinstance(f, pa.Field):
fields.append(f)
else:
fields.append(pa.field('_f{}'.format(i), f))
schema = pa.schema(fields, metadata={b'foo': b'bar'})
roundtripped = pickle.loads(pickle.dumps(schema))
assert schema == roundtripped
def test_empty_table():
schema1 = pa.schema([
pa.field('f0', pa.int64()),
pa.field('f1', pa.dictionary(pa.int32(), pa.string())),
pa.field('f2', pa.list_(pa.list_(pa.int64()))),
])
# test it preserves field nullability
schema2 = pa.schema([
pa.field('a', pa.int64(), nullable=False),
pa.field('b', pa.int64())
])
for schema in [schema1, schema2]:
table = schema.empty_table()
assert isinstance(table, pa.Table)
assert table.num_rows == 0
assert table.schema == schema
@pytest.mark.pandas
def test_schema_from_pandas():
import pandas as pd
inputs = [
list(range(10)),
pd.Categorical(list(range(10))),
['foo', 'bar', None, 'baz', 'qux'],
np.array([
'2007-07-13T01:23:34.123456789',
'2006-01-13T12:34:56.432539784',
'2010-08-13T05:46:57.437699912'
], dtype='datetime64[ns]'),
]
if Version(pd.__version__) >= Version('1.0.0'):
inputs.append(pd.array([1, 2, None], dtype=pd.Int32Dtype()))
for data in inputs:
df = pd.DataFrame({'a': data})
schema = pa.Schema.from_pandas(df)
expected = pa.Table.from_pandas(df).schema
assert schema == expected
def test_schema_sizeof():
schema = pa.schema([
pa.field('foo', pa.int32()),
pa.field('bar', pa.string()),
])
assert sys.getsizeof(schema) > 30
schema2 = schema.with_metadata({"key": "some metadata"})
assert sys.getsizeof(schema2) > sys.getsizeof(schema)
schema3 = schema.with_metadata({"key": "some more metadata"})
assert sys.getsizeof(schema3) > sys.getsizeof(schema2)
def test_schema_merge():
a = pa.schema([
pa.field('foo', pa.int32()),
pa.field('bar', pa.string()),
pa.field('baz', pa.list_(pa.int8()))
])
b = pa.schema([
pa.field('foo', pa.int32()),
pa.field('qux', pa.bool_())
])
c = pa.schema([
pa.field('quux', pa.dictionary(pa.int32(), pa.string()))
])
d = pa.schema([
pa.field('foo', pa.int64()),
pa.field('qux', pa.bool_())
])
result = pa.unify_schemas([a, b, c])
expected = pa.schema([
pa.field('foo', pa.int32()),
pa.field('bar', pa.string()),
pa.field('baz', pa.list_(pa.int8())),
pa.field('qux', pa.bool_()),
pa.field('quux', pa.dictionary(pa.int32(), pa.string()))
])
assert result.equals(expected)
with pytest.raises(pa.ArrowInvalid):
pa.unify_schemas([b, d])
def test_undecodable_metadata():
# ARROW-10214: undecodable metadata shouldn't fail repr()
data1 = b'abcdef\xff\x00'
data2 = b'ghijkl\xff\x00'
schema = pa.schema(
[pa.field('ints', pa.int16(), metadata={'key': data1})],
metadata={'key': data2})
assert 'abcdef' in str(schema)
assert 'ghijkl' in str(schema)
| apache-2.0 |
linhvannguyen/PhDworks | codes/isotropic/regression/regressionUtils.py | 2 | 10304 | """
Created on Aug 02 2016
@author: Linh Van Nguyen ([email protected])
"""
import numpy as np
from netCDF4 import Dataset
def data_preprocess(sspacing, tspacing):
"""
Load coupled input-output of LR and HR from file and normalize to zero-mean
and one- standard deviation
Parameters
----------
sspacing : 2D subsampling ratio in space (in one direction)
tspacing : 1D subsampling ratio in time
"""
# Constants
Nh = 96
Nt = 37
# Position of measurements in space-time
HTLS_sknots = np.arange(0,Nh,sspacing)
LTHS_tknots = np.arange(0,Nh,tspacing)
Nl = len(HTLS_sknots)
Ns = len(LTHS_tknots)
# Dimension of HTLS and LTHS
P = Nh*Nh
Q = Nl*Nl
M = Nt*Ns
#Load all training data
Xh_tr = np.zeros((M, P))
Xl_tr = np.zeros((M, Q))
ncfile1 = Dataset('/data/ISOTROPIC/data/data_downsampled4.nc','r')
for t in range(Nt):
count = 0
for i in LTHS_tknots:
xh = np.array(ncfile1.variables['velocity_x'][t,0:Nh,0:Nh,i])
xl = xh[0:-1:sspacing,0:-1:sspacing] # xh[np.meshgrid(HTLS_sknots,HTLS_sknots)]
Xh_tr[t*Ns + count,:] = np.reshape(xh,(1, P))
Xl_tr[t*Ns + count,:] = np.reshape(xl,(1, Q))
count = count + 1
ncfile1.close()
# normalized: centered, variance 1
mea_l = np.zeros(Q)
sig_l = np.zeros(Q)
for k in range(Q):
mea_l[k] = np.mean(Xl_tr[:,k])
sig_l[k] = np.std(Xl_tr[:,k])
Xl_tr[:,k] = (Xl_tr[:,k]-mea_l[k])/sig_l[k]
mea_h = np.zeros(P)
sig_h = np.zeros(P)
for k in range(P):
mea_h[k] = np.mean(Xh_tr[:,k])
sig_h[k] = np.std(Xh_tr[:,k])
Xh_tr[:,k] = (Xh_tr[:,k]-mea_h[k])/sig_h[k]
return (Xl_tr, mea_l, sig_l, Xh_tr,mea_h,sig_h)
####################### RIDGE REGRESSION ######################################
def RR_cv_estimate_alpha(sspacing, tspacing, alphas):
"""
Estimate the optimal regularization parameter using grid search from a list
and via k-fold cross validation
Parameters
----------
sspacing : 2D subsampling ratio in space (in one direction)
tspacing : 1D subsampling ratio in time
alphas : list of regularization parameters to do grid search
"""
#Load all training data
(Xl_tr, mea_l, sig_l, Xh_tr,mea_h,sig_h) = data_preprocess(sspacing, tspacing)
# RidgeCV
from sklearn.linear_model import RidgeCV
ridge = RidgeCV(alphas = alphas, cv = 10, fit_intercept=False, normalize=False)
ridge.fit(Xl_tr, Xh_tr)
RR_alpha_opt = ridge.alpha_
print('\n Optimal lambda:', RR_alpha_opt)
# save to .mat file
import scipy.io as io
filename = "".join(['/data/PhDworks/isotropic/regerssion/RR_cv_alpha_sspacing',
str(sspacing),'_tspacing',str(tspacing),'.mat'])
io.savemat(filename, dict(alphas=alphas, RR_alpha_opt=RR_alpha_opt))
# return
return RR_alpha_opt
def RR_allfields(sspacing, tspacing, RR_alpha_opt):
"""
Reconstruct all fields using RR and save to netcdf file
Parameters
----------
sspacing : 2D subsampling ratio in space (in one direction)
tspacing : 1D subsampling ratio in time
RR_alpha_opt : optimal regularization parameter given from RR_cv_estimate_alpha(sspacing, tspacing, alphas)
"""
# Constants
Nh = 96
Nt = 37
#Load all training data
(Xl_tr, mea_l, sig_l, Xh_tr,mea_h,sig_h) = data_preprocess(sspacing, tspacing)
# Ridge Regression
from sklearn.linear_model import Ridge
ridge = Ridge(alpha=RR_alpha_opt, fit_intercept=False, normalize=False)
ridge.fit(Xl_tr, Xh_tr)
print np.shape(ridge.coef_)
# Prediction and save to file
filename = "".join(['/data/PhDworks/isotropic/regerssion/RR_sspacing',
str(sspacing),'_tspacing',str(tspacing),'.nc'])
import os
try:
os.remove(filename)
except OSError:
pass
ncfile2 = Dataset(filename, 'w')
ncfile1 = Dataset('/data/PhDworks/isotropic/refdata_downsampled4.nc','r')
# create the dimensions
ncfile2.createDimension('Nt',Nt)
ncfile2.createDimension('Nz',Nh)
ncfile2.createDimension('Ny',Nh)
ncfile2.createDimension('Nx',Nh)
# create the var and its attribute
var = ncfile2.createVariable('Urec', 'd',('Nt','Nz','Ny','Nx'))
for t in range(Nt):
print('3D snapshot:',t)
for i in range(Nh):
xl = np.array(ncfile1.variables['velocity_x'][t,0:Nh:sspacing,0:Nh:sspacing,i]) # load only LR
xl = np.divide(np.reshape(xl,(1, xl.size)) - mea_l, sig_l) #pre-normalize
xrec = np.multiply(ridge.predict(xl), sig_h) + mea_h # re-normalize the prediction
var[t,:,:,i] = np.reshape(xrec, (Nh,Nh)) # put to netcdf file
# Close file
ncfile1.close()
ncfile2.close()
def RR_validationcurve(sspacing, tspacing, RR_lambda_opt, lambdas_range):
"""
Reconstruct all fields using RR and save to netcdf file
Parameters
----------
sspacing : 2D subsampling ratio in space (in one direction)
tspacing : 1D subsampling ratio in time
RR_alpha_opt : optimal regularization parameter given from RR_cv_estimate_alpha(sspacing, tspacing, alphas)
"""
# lambdas_range= np.logspace(-2, 4, 28)
#Load all training data
(Xl_tr, mea_l, sig_l, Xh_tr,mea_h,sig_h) = data_preprocess(sspacing, tspacing)
# validation curve
from sklearn.linear_model import Ridge
from sklearn.learning_curve import validation_curve
train_MSE, test_MSE = validation_curve(Ridge(),Xl_tr, Xh_tr, param_name="alpha", param_range=lambdas_range,
scoring = "mean_squared_error", cv=10)
# API always tries to maximize a loss function, so MSE is actually in the flipped sign
train_MSE = -train_MSE
test_MSE = -test_MSE
# save to .mat file
import scipy.io as sio
sio.savemat('/data/PhDworks/isotropic/regerssion/RR_crossvalidation.mat',
dict(lambdas_range=lambdas_range, train_MSE = train_MSE, test_MSE = test_MSE))
return (train_MSE, test_MSE)
def RR_learningcurve(sspacing, tspacing, RR_lambda_opt, train_sizes):
# train_sizes=np.linspace(.1, 1.0, 20)
#Load all training data
(Xl_tr, mea_l, sig_l, Xh_tr,mea_h,sig_h) = data_preprocess(sspacing, tspacing)
# Learning curve
from sklearn.linear_model import Ridge
from sklearn.learning_curve import learning_curve
from sklearn import cross_validation
estimator = Ridge(alpha=RR_lambda_opt, fit_intercept=False, normalize=False)
cv = cross_validation.ShuffleSplit(np.shape(Xl_tr)[0], n_iter=50, test_size=0.1, random_state=0)
train_sizes, train_MSE, test_MSE = learning_curve(estimator, Xl_tr, Xh_tr,
cv=cv, n_jobs=4,
train_sizes = train_sizes,
scoring = "mean_squared_error")
# save to .mat file
import scipy.io as sio
sio.savemat('/data/PhDworks/isotropic/regerssion/RR_learningcurve.mat',
dict(train_sizes=train_sizes, train_MSE = -train_MSE, test_MSE = -test_MSE))
####################### OTHER FUNCTIONS #######################################
def plot_learning_curve(estimator, plt, X, y, ylim=None, cv=None, n_jobs=1,
train_sizes=np.linspace(.1, 1.0, 5)):
"""
Generate a simple plot of the test and traning learning curve.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
plt : current matplotlib plot
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
ylim : tuple, shape (ymin, ymax), optional
Defines minimum and maximum yvalues plotted.
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
"""
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Number of training examples")
plt.ylabel("Score")
from sklearn.learning_curve import learning_curve
train_sizes, train_scores, test_scores = learning_curve(estimator, X, y, cv=cv,
n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1, color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r", label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g", label="Cross-validation score")
plt.grid()
plt.legend(loc="best")
return plt
def interp2 (x, y, z, xnew, ynew, kind='cubic'):
from scipy import interpolate
f = interpolate.interp2d(x, y, z, kind=kind)
return f(xnew, ynew)
def NRMSE (xref, xrec):
err = np.sqrt(np.sum(np.square(xref.ravel()-xrec.ravel())))/np.sqrt(np.sum(np.square(xref.ravel())))
return err
| mit |
kalkun/segmentor | preprocessing.py | 1 | 18286 | """
Example run
```
python3 preprocessing.py
```
"""
from PIL import Image
from scipy import ndimage
from skimage.filters import rank
from skimage.morphology import square
from skimage.morphology import disk
from skimage.morphology import white_tophat
import numpy
import cv2
import matplotlib.pyplot as plt
import PIL
from unbuffered import Unbuffered
import sys
# make print() not wait on the same buffer as the
# def it exists in:
sys.stdout = Unbuffered(sys.stdout)
class Preprocess:
"""
Preprocess class is responsible for anything preprocessing. It is build
for easy convolution of the preprocessing operations. Such that
operations may be easily followed by each other in any order by dotting
them out like so:
```
obj = Preprocess(
image="./STARE/im0255.ppm"
).meanFilter(
).show(
).greyOpening(
).show()
```
Notice how `show()` can be called after any operation. `show()` uses the
PIL Image debugger to show the image.
The implemented methods are generally limited to the methods describedin
Marin et al ITM 2011. However some methods allow for different
parameters to be used in the operation where the ones described in Marin
et al ITM 2011 are merely defaults.
To run the methods described in Marin et al 2011 in the same order as
described then the method `process` can be used:
```
obj = Preprocess(
image="./STARE/im0003.ppm"
).process(
).show(
).save(
path="./im0003_processed.png"
)
```
Non standard requesites for running are:
- scipy https://www.scipy.org/
- cv2 http://opencv-python-tutroals.readthedocs.io/en/latest/
- skimage http://scikit-image.org/
@class Preprocess
@param image {string} The path to the image to be preprocessed.
@param maskTh {int} The threshold value to create the mask from
@property source {string} Image source
@property image {PIL obj} PIL Image object
@property mask {numpy array} The mask matrix which is 0 in the area
outside FOV and 1's inside FOV
@property threshold {int} The threshold value from which the mask is
made from. Lower intensity than threshold and the pixel is
considered outside FOV and inside otherwise.
"""
def __init__(self, image, maskTh=50):
self.initialized = False
self.__printStatus(
"Initialize preprocessing for: " + image,
isEnd=True,
initial=True
)
self.source = image
self.name = image.split("/")[-1].split(".")[0]
self.image = Image.open(image)
self.loaded = self.image.load()
# self.threshold=50
self.threshold = maskTh
self.extractColorBands()
self.mask = numpy.uint8(
numpy.greater(
self.red_array,
self.threshold
).astype(int)
)
def save(self, path, array=numpy.empty(0), useMask=False, rotate=True):
"""
Saves the image array as png at the desired path.
@method save
@param path {string} the path where the image will be saved.
@param array {numpy array} The array which the image is made from,
default is self.image_array
@param useMask {Bool} Wether to reset non FOV pixel using the mask.
Default is False
"""
if not array.any():
array = self.image_array
if useMask:
array = array * self.mask
self._arrayToImage(array).save(path, "png", rotate=rotate)
self.__printStatus("saving to " + path + "...")
self.__printStatus("[done]", True)
return self
def _arrayToImage(self, array=numpy.empty(0), rotate=True):
"""
@private
@method arrayToImage
@param array {numpy array} array which is converted to an image
@param rotate {Bool} If true the image is transposed and rotated to
counter the numpy conversion of arrays.
"""
self.__printStatus("array to image...")
if not array.any():
array = self.image_array
img = Image.fromarray(numpy.uint8(array))
self.__printStatus("[done]", True)
if rotate:
return img.transpose(Image.FLIP_TOP_BOTTOM).rotate(-90)
else:
return img
def show(
self,
array=numpy.empty(0),
rotate=True,
invert=False,
useMask=False,
mark=None
):
"""
@method show
@param array {numpy array} image array to be shown.
@param rotate {Bool} Wether to rotate countering numpys array
conversion, default True.
@param invert {Bool} Invert the image, default False.
@param useMask {Bool} Reset non FOV pixels using the mask, default
is False.
"""
if not array.any():
array = self.image_array
im = self._arrayToImage(array, rotate=rotate)
self.__printStatus("show image...")
if useMask:
array = array * self.mask
if mark:
im = im.convert("RGB")
pixels = im.load()
x, y = mark
for i in range(x-1, x+1):
for j in range(y-1, y+1):
# color an area around the mark
# blue, for easilier visibility
pixels[i, j] = (0, 0, 255)
if invert:
Image.eval(im, lambda x:255-x).show()
else:
print("#####", im.mode, "#####")
im.show()
self.__printStatus("[done]", True)
return self
def extractColorBands(self):
"""
Returns a greyscaled array from the green channel in
the original image.
@method extractColorBands
"""
self.__printStatus("Extract color bands...")
green_array = numpy.empty([self.image.size[0], self.image.size[1]], int)
red_array = numpy.empty([self.image.size[0], self.image.size[1]], int)
for x in range(self.image.size[0]):
for y in range(self.image.size[1]):
red_array[x,y] = self.loaded[x,y][0]
green_array[x,y] = self.loaded[x,y][1]
self.green_array = green_array
self.red_array = red_array
self.image_array = self.green_array
self.__printStatus("[done]", True)
return self
def greyOpening(self, array=numpy.empty(0)):
"""
Makes a 3x3 morphological grey opening
@method greyOpening
@param array {numpy array} array to operate on.
"""
self.__printStatus("Grey opening...")
if not array.any():
array = self.image_array
self.grey_opened = ndimage.morphology.grey_opening(array, [3,3])
self.image_array = self.grey_opened * self.mask
self.__printStatus("[done]", True)
return self
def meanFilter(self, m=3, array=numpy.empty(0)):
"""
Mean filtering, replaces the intensity value, by the average
intensity of a pixels neighbours including itself.
m is the size of the filter, default is 3x3
@method meanFilter
@param m {int} The width and height of the m x m filtering matrix,
default is 3.
@param array {numpy array} the array which the operation is carried
out on.
"""
self.__printStatus("Mean filtering " + str(m) + "x" + str(m) + "...")
if not array.any():
array = self.image_array
if array.dtype not in ["uint8", "uint16"]:
array = numpy.uint8(array)
mean3x3filter = rank.mean(array, square(m), mask=self.mask)
self.image_array = mean3x3filter * self.mask
self.__printStatus("[done]", True)
return self
def gaussianFilter(self, array=numpy.empty(0), sigma=1.8, m=9):
"""
@method gaussianFilter
@param array {numpy array} the array the operation is carried out
on, default is the image_array.
@param sigma {Float} The value of sigma to be used with the gaussian
filter operation
@param m {int} The size of the m x m matrix to filter with.
"""
self.__printStatus(
"Gaussian filter sigma=" + str(sigma) + ", m=" + str(m) + "..."
)
if not array.any():
array = self.image_array
self.image_array = cv2.GaussianBlur(array, (m,m), sigma) * self.mask
self.__printStatus("[done]", True)
return self
def _getBackground(self, array=numpy.empty(0), threshold=None):
"""
_getBackground returns an image unbiased at the edge of the FOV
@method _getBackground
@param array {numpy array} the array the operation is carried out
on, default is the image_array.
@param threshold {int} Threshold that is used to compute a
background image, default is self.threshold.
"""
if not array.any():
array = self.red_array
if not threshold:
threshold = self.threshold
saved_image_array = self.image_array
background = self.meanFilter(m=69).image_array
self.__printStatus("Get background image...")
# reset self.image_array
self.image_array = saved_image_array
for x in range(len(background)):
for y in range(len(background[0])):
if array[x,y] > threshold:
if x-35 > 0:
x_start = x-35
else:
x_start = 0
if x+34 < len(background):
x_end = x+34
else:
x_end = len(background) -1
if y-35 > 0:
y_start = y-35
else:
y_start = 0
if y+35 < len(background[0]):
y_end = y+35
else:
y_end = len(background[0]) -1
# 1 is added to the right and bottom boundary because of
# pythons way of indexing
x_end += 1
y_end += 1
# mask is is the same subMatrix but taken from the original
# image array
mask = array[x_start:x_end, y_start:y_end]
# indexes of the non fov images
nonFOVs = numpy.less(mask, threshold)
# indexes of FOVs
FOVs = numpy.greater(mask, threshold)
# subMat is a 69x69 matrix with x,y as center
subMat = background[x_start:x_end, y_start:y_end]
# subMat must be a copy in order to not allocate values into
# background directly
subMat = numpy.array(subMat, copy=True)
subMat[nonFOVs] = subMat[FOVs].mean()
# finding every element less than 10 from the original image
# and using this as indices on the background subMatrix
# is used to calculate the average from the 'remaining
# pixels in the square'
background[x,y] = subMat.mean()
self.__printStatus("[done]", True)
return background
def subtractBackground(self, array=numpy.empty(0)):
"""
@method subtractBackground
@param array {numpy array} the array the operation is carried out
on, default is the image_array.
"""
if not array.any():
array = self.image_array
background = self._getBackground() * self.mask
self.__printStatus("Subtract background...")
self.image_array = numpy.subtract(
numpy.int16(array),
numpy.int16(background)
) * self.mask
self.__printStatus("[done]", True)
return self
def linearTransform(self, array=numpy.empty(0)):
"""
Shade correction maps the background image into values
that fits the grayscale 8 bit images [0-255]
from: http://stackoverflow.com/a/1969274/2853237
@method linearTransform
@param array {numpy array} the array the operation is carried out
on, default is the image_array.
"""
self.__printStatus("Linear transforming...")
if not array.any():
array = self.image_array
# Figure out how 'wide' each range is
leftSpan = array.max() - array.min()
rightSpan = 255
array = ((array - array.min()) / leftSpan) * rightSpan
self.image_array = array * self.mask
self.__printStatus("[done]", True)
return self
def transformIntensity(self, array=numpy.empty(0)):
"""
@method transformIntensity
@param array {numpy array} the array the operation is carried out
on, default is the image_array.
"""
self.__printStatus("Scale intensity levels...")
if not array.any():
array = self.image_array
counts = numpy.bincount(array.astype(int).flat)
ginput_max = numpy.argmax(counts)
for x in range(len(array)):
for y in range(len(array[0])):
st = str(array[x,y]) + " ==> "
st += str(array[x,y] + 128 - ginput_max) + " "
array[x,y] + 128 - ginput_max
if array[x,y] < 0:
array[x,y] = 0
elif array[x,y] > 255:
array[x,y] = 255
s = str(ginput_max)
self.image_array = array * self.mask
self.__printStatus("[done]", True)
return self
def vesselEnhance(self, array=numpy.empty(0)):
"""
@method vesselEnhance
@param array {numpy array} the array the operation is carried out
on, default is the image_array.
"""
self.__printStatus("Vessel enhancement...");
if not array.any():
array = self.image_array
# disk shaped mask with radius 8
disk_shape = disk(8)
# the complimentary image is saved to hc:
array = numpy.uint8(array)
hc = 255 - array
# Top Hat transform
# https://en.wikipedia.org/wiki/Top-hat_transform
# White top hat is defined as the difference between
# the opened image and the original image.
# in this case the starting image is the complimentary image `hc`
self.image_array = white_tophat(hc, selem=disk_shape) * self.mask
self.__printStatus("[done]", True)
return self
def __printStatus(self, status, isEnd=False, initial=False):
"""
@private
@method __printStatus
@param status {string}
@param isEnd {Bool} Wether to end with a newline or not, default is
false.
@param initial {Bool} Wether this is the first status message to be
printed, default False.
"""
if not initial and not isEnd:
status = "\t" + status
if initial:
status = "\n" + status
if isEnd:
delim="\n"
else:
delim=""
# set tabs so status length is 48
tabs = ((48 - len(status)) // 8) * "\t"
status += tabs
print(status, end=delim, sep="")
def process(self, enhance=True, onlyEnhance=False):
"""
`process` starts the preprocess process described in
Marin et al ITM [2011]
The article works with two types of preprocessed images.
The first is the convoluted image obtained with all operations
except for `vesselEnhance` denoted as a homogenized image. And the
second is the vessel enhanced image which is the convolution of the
vessel enhancement operation on the homogenized image.
This method supports both images. If `enhance` is False then
self.image_array will be of the homogenized image and afterwards the
vessel enhanced image can be computed without starting over by
setting `onlyEnhance` to True. So to compute both images one at a
time one could call:
```
obj = Preprocess(
"./im0075.ppm"
)
.process(
enhance=False
).show(
).process(
onlyEnhance=True
).show()
```
@method process
@method enhance {Bool} Wether to also process the vessel enhancement
operation or not, default True.
@method onlyEnhance {Bool} Wether to only do the vessel enhancement
operation, default False.
"""
if not onlyEnhance:
self.greyOpening()
self.meanFilter()
self.gaussianFilter()
self.subtractBackground()
self.linearTransform()
self.transformIntensity()
if enhance or onlyEnhance:
self.vesselEnhance()
# returns the object where
# all described preprocess has taken place
# available on self.feature_array or self.show(), self.save(<path>)
return self | bsd-3-clause |
sebastien-forestier/pydmps | pydmps/dmp.py | 3 | 7418 | '''
Copyright (C) 2013 Travis DeWolf
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import numpy as np
from cs import CanonicalSystem
class DMPs(object):
"""Implementation of Dynamic Motor Primitives,
as described in Dr. Stefan Schaal's (2002) paper."""
def __init__(self, dmps, bfs, dt=.01,
y0=0, goal=1, w=None,
ay=None, by=None, **kwargs):
"""
dmps int: number of dynamic motor primitives
bfs int: number of basis functions per DMP
dt float: timestep for simulation
y0 list: initial state of DMPs
goal list: goal state of DMPs
w list: tunable parameters, control amplitude of basis functions
ay int: gain on attractor term y dynamics
by int: gain on attractor term y dynamics
"""
self.dmps = dmps
self.bfs = bfs
self.dt = dt
if isinstance(y0, (int, float)):
y0 = np.ones(self.dmps)*y0
self.y0 = y0
if isinstance(goal, (int, float)):
goal = np.ones(self.dmps)*goal
self.goal = goal
if w is None:
# default is f = 0
w = np.zeros((self.dmps, self.bfs))
self.w = w
if ay is None: ay = np.ones(dmps) * 25. # Schaal 2012
self.ay = ay
if by is None: by = self.ay.copy() / 4. # Schaal 2012
self.by = by
# set up the CS
self.cs = CanonicalSystem(dt=self.dt, **kwargs)
self.timesteps = int(self.cs.run_time / self.dt)
# set up the DMP system
self.reset_state()
def check_offset(self):
"""Check to see if initial position and goal are the same
if they are, offset slightly so that the forcing term is not 0"""
for d in range(self.dmps):
if (self.y0[d] == self.goal[d]):
self.goal[d] += 1e-4
def gen_front_term(self, x, dmp_num): raise NotImplementedError()
def gen_goal(self, y_des): raise NotImplementedError()
def gen_psi(self): raise NotImplementedError()
def gen_weights(self, f_target): raise NotImplementedError()
def imitate_path(self, y_des):
"""Takes in a desired trajectory and generates the set of
system parameters that best realize this path.
y_des list/array: the desired trajectories of each DMP
should be shaped [dmps, run_time]
"""
# set initial state and goal
if y_des.ndim == 1:
y_des = y_des.reshape(1,len(y_des))
self.y0 = y_des[:,0].copy()
self.y_des = y_des.copy()
self.goal = self.gen_goal(y_des)
self.check_offset()
if not (self.timesteps == y_des.shape[1]):
# generate function to interpolate the desired trajectory
import scipy.interpolate
path = np.zeros((self.dmps, self.timesteps))
x = np.linspace(0, self.cs.run_time, y_des.shape[1])
for d in range(self.dmps):
path_gen = scipy.interpolate.interp1d(x, y_des[d])
for t in range(self.timesteps):
path[d, t] = path_gen(t * self.dt)
y_des = path
# calculate velocity of y_des
dy_des = np.diff(y_des) / self.dt
# add zero to the beginning of every row
dy_des = np.hstack((np.zeros((self.dmps, 1)), dy_des))
# calculate acceleration of y_des
ddy_des = np.diff(dy_des) / self.dt
# add zero to the beginning of every row
ddy_des = np.hstack((np.zeros((self.dmps, 1)), ddy_des))
f_target = np.zeros((y_des.shape[1], self.dmps))
# find the force required to move along this trajectory
for d in range(self.dmps):
f_target[:,d] = ddy_des[d] - self.ay[d] * \
(self.by[d] * (self.goal[d] - y_des[d]) - \
dy_des[d])
# efficiently generate weights to realize f_target
self.gen_weights(f_target)
'''# plot the basis function activations
import matplotlib.pyplot as plt
plt.figure()
plt.subplot(211)
plt.plot(psi_track)
plt.title('psi_track')
# plot the desired forcing function vs approx
plt.subplot(212)
plt.plot(f_target[:,0])
plt.plot(np.sum(psi_track * self.w[0], axis=1))
plt.legend(['f_target', 'w*psi'])
plt.tight_layout()
plt.show()'''
self.reset_state()
return y_des
def rollout(self, timesteps=None, **kwargs):
"""Generate a system trial, no feedback is incorporated."""
self.reset_state()
if timesteps is None:
if kwargs.has_key('tau'):
timesteps = int(self.timesteps / kwargs['tau'])
else:
timesteps = self.timesteps
# set up tracking vectors
y_track = np.zeros((timesteps, self.dmps))
dy_track = np.zeros((timesteps, self.dmps))
ddy_track = np.zeros((timesteps, self.dmps))
for t in range(timesteps):
y, dy, ddy = self.step(**kwargs)
# record timestep
y_track[t] = y
dy_track[t] = dy
ddy_track[t] = ddy
return y_track, dy_track, ddy_track
def reset_state(self):
"""Reset the system state"""
self.y = self.y0.copy()
self.dy = np.zeros(self.dmps)
self.ddy = np.zeros(self.dmps)
self.cs.reset_state()
def step(self, tau=1.0, state_fb=None):
"""Run the DMP system for a single timestep.
tau float: scales the timestep
increase tau to make the system execute faster
state_fb np.array: optional system feedback
"""
# run canonical system
cs_args = {'tau':tau,
'error_coupling':1.0}
if state_fb is not None:
# take the 2 norm of the overall error
state_fb = state_fb.reshape(1,self.dmps)
dist = np.sqrt(np.sum((state_fb - self.y)**2))
cs_args['error_coupling'] = 1.0 / (1.0 + 10*dist)
x = self.cs.step(**cs_args)
# generate basis function activation
psi = self.gen_psi(x)
for d in range(self.dmps):
# generate the forcing term
f = self.gen_front_term(x, d) * \
(np.dot(psi, self.w[d])) / np.sum(psi) if self.bfs > 0. else 0.
# DMP acceleration
self.ddy[d] = (self.ay[d] *
(self.by[d] * (self.goal[d] - self.y[d]) - \
self.dy[d]/tau) + f) * tau
self.dy[d] += self.ddy[d] * tau * self.dt * cs_args['error_coupling']
self.y[d] += self.dy[d] * self.dt * cs_args['error_coupling']
return self.y, self.dy, self.ddy
| gpl-3.0 |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/doc/mpl_examples/api/patch_collection.py | 3 | 1229 | import matplotlib
from matplotlib.patches import Circle, Wedge, Polygon
from matplotlib.collections import PatchCollection
import pylab
fig=pylab.figure()
ax=fig.add_subplot(111)
resolution = 50 # the number of vertices
N = 3
x = pylab.rand(N)
y = pylab.rand(N)
radii = 0.1*pylab.rand(N)
patches = []
for x1,y1,r in zip(x, y, radii):
circle = Circle((x1,y1), r)
patches.append(circle)
x = pylab.rand(N)
y = pylab.rand(N)
radii = 0.1*pylab.rand(N)
theta1 = 360.0*pylab.rand(N)
theta2 = 360.0*pylab.rand(N)
for x1,y1,r,t1,t2 in zip(x, y, radii, theta1, theta2):
wedge = Wedge((x1,y1), r, t1, t2)
patches.append(wedge)
# Some limiting conditions on Wedge
patches += [
Wedge((.3,.7), .1, 0, 360), # Full circle
Wedge((.7,.8), .2, 0, 360, width=0.05), # Full ring
Wedge((.8,.3), .2, 0, 45), # Full sector
Wedge((.8,.3), .2, 45, 90, width=0.10), # Ring sector
]
for i in range(N):
polygon = Polygon(pylab.rand(N,2), True)
patches.append(polygon)
colors = 100*pylab.rand(len(patches))
p = PatchCollection(patches, cmap=matplotlib.cm.jet, alpha=0.4)
p.set_array(pylab.array(colors))
ax.add_collection(p)
pylab.colorbar(p)
pylab.show()
| gpl-2.0 |
VahidGh/ChannelWorm | channelworm/fitter/examples/EGL-36.py | 4 | 7717 | """
Example of using cwFitter to generate a HH model for EGL-36 ion channel
Based on experimental data from doi:10.1016/S0896-6273(00)80355-4
"""
import os.path
import sys
import time
import numpy as np
import matplotlib.pyplot as plt
sys.path.append('../../..')
from channelworm.fitter import *
if __name__ == '__main__':
cwd=os.getcwd()
path = cwd+'/egl-36-data/boltzmannFit/'
if not os.path.exists(path):
os.makedirs(path)
pov_id = 11
vc_id = 12
args = {'weight':{'start':1,'peak':1,'tail':1,'end':1}}
sampleData = {}
myInitiator = initiators.Initiator()
print 'Sample Data:'
sampleData['POV'] = myInitiator.get_graphdata_from_db(pov_id,plot=False)
print 'POV'
sampleData['VClamp'] = myInitiator.get_graphdata_from_db(vc_id, plot=False)
print 'VClamp'
scale = False
bio_params = myInitiator.get_bio_params()
sim_params = myInitiator.get_sim_params()
myEvaluator = evaluators.Evaluator(sampleData,sim_params,bio_params,scale=scale,args=args)
print 'Scale: %s'%scale
print 'args:'
print args
# bio parameters for EGL-36
bio_params['cell_type'] = 'Xenopus oocytes'
bio_params['channel_type'] = 'EGL-36'
bio_params['ion_type'] = 'K'
bio_params['val_cell_params'][0] = 200e-9 # C_mem DOI: 10.1074/jbc.M605814200
bio_params['val_cell_params'][1] = 20e-6 # area DOI: 10.1101/pdb.top066308
bio_params['gate_params'] = {'vda': {'power': 1},'cd': {'power': 1}}
print 'Gate_params:'
print bio_params['gate_params']
bio_params['channel_params'] = ['g_dens','e_rev']
bio_params['unit_chan_params'] = ['S/m2','V']
bio_params['min_val_channel'] = [1 , -150e-3]
bio_params['max_val_channel'] = [10, 150e-3]
bio_params['channel_params'].extend(['v_half_a','k_a','T_a'])
bio_params['unit_chan_params'].extend(['V','V','s'])
bio_params['min_val_channel'].extend([-0.15, 0.001, 0.001])
bio_params['max_val_channel'].extend([ 0.15, 0.1, 1])
# # #Parameters for Ca-dependent inactivation (Boyle & Cohen 2008)
bio_params['channel_params'].extend(['ca_half','alpha_ca','k_ca','T_ca'])
bio_params['unit_chan_params'].extend(['M','','M','s'])
bio_params['min_val_channel'].extend([1e-10,0.1, -1e-6, 1e-4])
bio_params['max_val_channel'].extend([1e-6 , 1 , -1e-9, 1])
# TODO: Separate simulator protocols from plot
# Simulation parameters for EGL-36 VClamp and POV
sim_params['v_hold'] = -90e-3
sim_params['I_init'] = 0
sim_params['pc_type'] = 'VClamp'
sim_params['deltat'] = 1e-4
sim_params['duration'] = 1.2
sim_params['start_time'] = 0.045
sim_params['end_time'] = 1.055
sim_params['protocol_start'] = -90e-3
sim_params['protocol_end'] = 90e-3
sim_params['protocol_steps'] = 10e-3
sim_params['ca_con'] = 1e-6
print 'Sim_params:'
print sim_params
# opt = '-pso'
# opt = '-ga'
# opt = 'leastsq'
opt = None
print 'Optimization method:'
print opt
if len(sys.argv) == 2:
opt = sys.argv[1]
start = time.time()
if opt == '-ga':
opt_args = myInitiator.get_opt_params()
opt_args['max_evaluations'] = 300
opt_args['population_size'] = 600
# opt_args['verbose'] = False
best_candidate, score = myEvaluator.ga_evaluate(min=bio_params['min_val_channel'],
max=bio_params['max_val_channel'],
args=opt_args)
elif opt == '-pso':
opt_args = myInitiator.get_opt_params(type='PSO')
opt_args['minstep'] = 1e-18
opt_args['minfunc'] = 1e-18
opt_args['swarmsize'] = 500
opt_args['maxiter'] = 100
opt_args['POV_dist'] = 4e-4
best_candidate, score = myEvaluator.pso_evaluate(lb=bio_params['min_val_channel'],
ub=bio_params['max_val_channel'],
args=opt_args)
else:
opt_args = {}
# vda,cd *******
best_candidate = [ 3.00231776e+00, -9.00073633e-02, 6.02673501e-02, 1.95933741e-02,
2.53990016e-02, 1.00000000e-9, 8.18616232e-01, -3.29244576e-08,
2.42556384e-01] # 7.85842303587e-15
if opt_args:
print 'Optimization parameters:'
print opt_args
if opt == 'leastsq':
# best_candidate = np.asarray(bio_params['min_val_channel']) + np.asarray(bio_params['max_val_channel']) / 2
best_candidate_params = dict(zip(bio_params['channel_params'],best_candidate))
cell_var = dict(zip(bio_params['cell_params'],bio_params['val_cell_params']))
# sim_params['protocol_start'] = 10e-3
# sim_params['protocol_end'] = 70e-3
# vcSim = simulators.Simulator(sim_params,best_candidate_params,cell_var,bio_params['gate_params'],act_fit=True)
vcSim = simulators.Simulator(sim_params,best_candidate_params,cell_var,bio_params['gate_params'])
vcEval = evaluators.Evaluator(sampleData,sim_params,bio_params,scale=scale)
# args['weight'] = {'POV':10}
args['weight'] = {}
args['ftol'] = 1e-14
# args['xtol'] = 1e-14
# args['full_output'] = 1
result = vcSim.vclamp_leastsq(params= bio_params['channel_params'],
best_candidate= best_candidate,
sampleData=sampleData,args=args)
print 'Optimized using Scipy leastsq:'
print result
print 'Full output:'
print result
print 'leastsq Parameters:'
print args
best_candidate = result
if 'POV' in sampleData:
POV_fit_cost = vcEval.pov_cost(result)
print 'POV cost:'
print POV_fit_cost
VClamp_fit_cost = vcEval.vclamp_cost(result)
print 'VClamp cost:'
print VClamp_fit_cost
secs = time.time()-start
print("----------------------------------------------------\n\n"
+"Ran in %f seconds (%f mins)\n"%(secs, secs/60.0))
best_candidate_params = dict(zip(bio_params['channel_params'],best_candidate))
cell_var = dict(zip(bio_params['cell_params'],bio_params['val_cell_params']))
print 'best candidate after optimization:'
print best_candidate_params
mySimulator = simulators.Simulator(sim_params,best_candidate_params,cell_var,bio_params['gate_params'],act_fit=True)
mySimulator = simulators.Simulator(sim_params,best_candidate_params,cell_var,bio_params['gate_params'])
bestSim = mySimulator.patch_clamp()
#
myModelator = modelators.Modelator(bio_params,sim_params)
myModelator.compare_plots(sampleData,bestSim,show=True, path=path)
myModelator.patch_clamp_plots(bestSim,show=True, path=path)
# # Decreasing voltage steps for pretty gating plots
sim_params['protocol_steps'] = 1e-3
# # sim_params['deltat'] = 1e-5
mySimulator = simulators.Simulator(sim_params,best_candidate_params,cell_var,bio_params['gate_params'])
bestSim = mySimulator.patch_clamp()
#
myModelator = modelators.Modelator(bio_params,sim_params)
myModelator.gating_plots(bestSim, show=True, path=path)
# Generate NeuroML2 file
contributors = [{'name': 'Vahid Ghayoomi','email': '[email protected]'}]
model_params = myInitiator.get_modeldata_from_db(fig_id=vc_id,model_id=3,contributors=contributors,file_path=path)
print model_params
nml2_file = myModelator.generate_channel_nml2(bio_params,best_candidate_params,model_params)
run_nml_out = myModelator.run_nml2(model_params['file_name'])
| mit |
sdvillal/manysources | manysources/analyses/substructures.py | 1 | 31188 | from collections import defaultdict
import os.path as op
from itertools import izip
import warnings
import cPickle as pickle
import glob
import math
import os
import h5py
import pandas as pd
import numpy as np
from rdkit import Chem
import matplotlib.pyplot as plt
from manysources import MANYSOURCES_ROOT
from manysources.datasets import ManysourcesDataset, MANYSOURCES_MOLECULES
from manysources.hub import Hub
warnings.simplefilter("error")
PROBLEMATIC_EXPIDS = {'bcrp':[489, 1840, 2705, 2780, 3842], 'hERG':[]}
def substructs_weights_one_source(source, model='logreg3', feats='ecfps1', dset='bcrp', num_expids=4096):
"""
Given a source, what are the weights of all the substructures when this source is in train / in test for LSO for
all requested expids. We now use the Hub. For the cases where the source is in train, it happens many times per
expid so we take the average.
"""
importances_source_in_lso = []
expids = tuple(range(num_expids))
hub = Hub(dset_id=dset, lso=True, model=model, feats=feats, expids=expids)
source_coocs = hub.scoocs()
# indices (expids, fold id) of source in test
indices_in_test = source_coocs[source_coocs[source]].index
indices_in_test = [(expid, foldnum) for (expid, foldnum) in indices_in_test if expid not in PROBLEMATIC_EXPIDS[dset]]
# indices (expids, fold ids) of source in train
indices_in_train = source_coocs[source_coocs[source]==False].index
# transform it into a dictionary of {expids:[foldnums]}
indices_in_train_dict = defaultdict(list)
for expid, foldnum in indices_in_train:
if expid not in PROBLEMATIC_EXPIDS[dset]:
indices_in_train_dict[expid].append(foldnum)
# get corresponding weights
weights,_, expids, foldnums = hub.logreg_models()
rows_out = [row for row, (expid, foldnum) in enumerate(izip(expids, foldnums))
if (expid, foldnum) in indices_in_test]
weights_in_test = weights[rows_out, :].todense()
# For train, we get several foldnums per expids and we want to average those weights
for expid_in in indices_in_train_dict.keys():
rows = [row for row, (expid, fold) in enumerate(izip(expids, foldnums)) if expid == expid_in
and fold in indices_in_train_dict[expid_in]]
w = weights[rows, :]
w = np.squeeze(np.asarray(w.tocsc().mean(axis=0)))
importances_source_in_lso.append(w)
return indices_in_train_dict.keys(), np.array(importances_source_in_lso), np.asarray(weights_in_test)
def proportion_relevant_features(source, dset='bcrp', model='logreg3', feats='ecfps1'):
"""
Here, "relevant" refears to having a non null weight in the models.
"""
sums_in = []
sums_out = []
expids, all_weights_in, all_weights_out = substructs_weights_one_source(source=source, model=model, feats=feats, dset=dset)
for weights_in, weights_out in zip(all_weights_in, all_weights_out):
sums_in.append(np.sum(weights_in != 0))
sums_out.append(np.sum(weights_out != 0))
return np.mean(np.array(sums_in)), np.mean(np.array(sums_out))
def most_changing_substructs_source(dset='bcrp', model='logreg3', feats='ecfps1', source='Imai_2004', top=10):
"""
Returns a dictionary of {substruct:[changes in weight]} for all the expids in which the substructure was among the
top most changing in terms of logistic weights (comparing weights when the source is in training or in test)
"""
if not op.exists(op.join(MANYSOURCES_ROOT, 'data', 'results', dset, 'top_%i_substructures_changing_weight_%s_%s_%s.dict'
%(top, source, model, feats))):
substruct_changes_dict = defaultdict(list)
expids, all_weights_in, all_weights_out = substructs_weights_one_source(source=source, model=model, feats=feats, dset=dset)
i2s = ManysourcesDataset(dset).ecfps(no_dupes=True).i2s
# dimensions: expids * num substructures
# get the absolute weight difference between source in and source out
difference_weights = np.absolute(np.subtract(all_weights_in, all_weights_out))
orders = np.argsort(difference_weights, axis=1) # for each expid, indices of the sorted weight differences
# Let's take top n differences
for expid, o_i in enumerate(orders[:,-top:]): # because the argsort puts first the smallest weight differences!
great_substructs = [i2s[i] for i in o_i]
corresponding_weights = difference_weights[expid][o_i]
for i, sub in enumerate(great_substructs):
substruct_changes_dict[sub].append(corresponding_weights[i])
# substruct_changes_dict now contains the changes of weight obtained for all the expid in which the substruct was
# among the top n changing substructs.
with open(op.join(MANYSOURCES_ROOT, 'data', 'results', dset, 'top_%i_substructures_changing_weight_%s_%s_%s.dict'
%(top, source, model, feats)), 'wb') as writer:
pickle.dump(substruct_changes_dict, writer, protocol=pickle.HIGHEST_PROTOCOL)
return substruct_changes_dict
else:
with open(op.join(MANYSOURCES_ROOT, 'data', 'results', dset, 'top_%i_substructures_changing_weight_%s_%s_%s.dict'
%(top, source, model, feats)), 'rb') as reader:
return pickle.load(reader)
def substructures_change_weight(source, model='logreg3', feats='ecfps1', dset='bcrp', non_zero_diff=0.01):
"""
Given a source, retrieve substructures present in the source that on averagenchange weight when this source is in
train / in test in LSO. Also returns the occurrences of these substructures in the 2 classes (inhibitior /
non inhibitor)
"""
_, weights_ins, weights_outs = substructs_weights_one_source(source, model=model, feats=feats, dset=dset)
# average over all expids:
weights_in = np.array(weights_ins).mean(axis=0)
# average over all expids
weights_out = np.array(weights_outs).mean(axis=0)
i2s = ManysourcesDataset(dset).ecfps(no_dupes=True).i2s
difference_weights = np.array(weights_in - weights_out)
order = np.argsort(difference_weights)
ordered_diff_w = difference_weights[order]
ordered_substr = i2s[order]
print '%i substructures have their weights decreased when the source %s is in external set (LSO)' \
%(len(ordered_substr[ordered_diff_w > non_zero_diff]), source)
print '%i substructures have their weights increased when the source %s is in external set (LSO)' \
%(len(ordered_substr[ordered_diff_w < - non_zero_diff]), source)
# Retrieve occurrence in source for each of those substructures with non-zero difference
subs_dict = defaultdict(list)
# 1. Decreased weight
for subs, weight_diff in zip(ordered_substr[ordered_diff_w > non_zero_diff], ordered_diff_w[ordered_diff_w > non_zero_diff]):
n1, n2 = substructure_appears_in_source(subs, source, dataset_nickname=dset)
if (n1, n2) != (0,0):
subs_dict[subs].append((n1, n2))
subs_dict[subs].append(weight_diff)
# 2. Increased weight
for subs, weight_diff in zip(ordered_substr[ordered_diff_w < -non_zero_diff], ordered_diff_w[ordered_diff_w < - non_zero_diff]):
n1, n2 = substructure_appears_in_source(subs, source, dataset_nickname=dset)
if (n1, n2) != (0,0):
subs_dict[subs].append((n1, n2))
subs_dict[subs].append(weight_diff)
return subs_dict
def substructure_appears_in_source(substr, source, dataset_nickname='bcrp'):
"""
Returns a tuple (int, int) counting how many compounds of the given source contain the given substructure.
In position 0 of the tuple, we report the number of inhibitors and in position 1 we report the number of inactives.
"""
rdkimold = MANYSOURCES_MOLECULES[dataset_nickname]()
in_class_1 = 0
in_class_0 = 0
molid2mol = {}
molids = rdkimold.molids()
molid2src = {}
molid2act = {}
for molid in molids:
molid2mol[molid] = rdkimold.molid2mol(molid)
molid2src[molid] = rdkimold.molid2source(molid)
molid2act[molid] = rdkimold.molid2label(molid)
src2molids = defaultdict(list)
for molid, src in molid2src.iteritems():
src2molids[src].append(molid)
# How many molecules contain this substructure in class 1, how many in class 0
for molid in src2molids[source]:
molec = molid2mol[molid]
patt = Chem.MolFromSmarts(substr)
act = molid2act[molid]
if molec.HasSubstructMatch(patt):
if act == 'INHIBITOR': # Careful, maybe this does not work for all datasets!!
in_class_1 += 1
else:
in_class_0 += 1
return in_class_1, in_class_0
def faster_relevant_feats(source, dset='bcrp'):
"""
Here we try to do the same but using Santi's advice
"""
X, _ = ManysourcesDataset(dset).ecfpsXY(no_dupes=True) # the sparse matrix of the features
all_molids = list(ManysourcesDataset(dset).ecfps(no_dupes=True).molids)
molids_in_source = ManysourcesDataset(dset).molecules().source2molids(source)
mol_indices = np.array([all_molids.index(molid) for molid in molids_in_source])
Xsource = X[mol_indices, :]
features_indices = set(Xsource.indices)
return features_indices
def plot_smarts(smarts, directory):
from integration import smartsviewer_utils
if len(smarts) > 1: # let's remove the C and c...
svr = smartsviewer_utils.SmartsViewerRunner(w=200, h=200)
svr.depict(smarts, op.join(directory, smarts + '.png'))
return op.join(directory, smarts + '.png')
def positive_negative_substructs(model='logreg3', feats='ecfps1', dset='bcrp', lso=True, num_expids=4096,
top_interesting=20):
'''
Given a dataset, collect all weights for all substructures across all expids, then average them and check the
extremes: positive weights mean a substructure that is likely to occur in inhibitors, negative weights mean
substructures more likely to occur in non-inhibitors. Are we learning something?
'''
hub = Hub(dset_id=dset, expids=num_expids, lso=lso, model=model, feats=feats)
weights, _, expids, foldnums = hub.logreg_models()
average_weights = np.asarray(weights.mean(axis=0))[0]
i2s = ManysourcesDataset(dset).ecfps(no_dupes=True).i2s
order = np.argsort(average_weights)
ordered_substructures = i2s[order]
ordered_importances = average_weights[order]
top_inactives = zip(ordered_importances[0:top_interesting], ordered_substructures[0:top_interesting])
top_inhibitors = zip(ordered_importances[-top_interesting:], ordered_substructures[-top_interesting:])
# Let's plot them!
from PIL import Image
for weight, substr in top_inactives:
plot_smarts(substr, '/home/flo/Desktop')
ims = [Image.open(f) for f in glob.glob(op.join('/home/flo/Desktop', '*.png'))]
num_lines = math.ceil(float(len(ims))/4)
blank_image = Image.new("RGB", (800, int(num_lines*200)), color='white')
for i, im in enumerate(ims):
im.thumbnail((200,200), Image.ANTIALIAS)
blank_image.paste(im, (200 * (i%4), 200 * (i/4)))
blank_image.save(op.join(MANYSOURCES_ROOT, 'data', 'results', dset,
'substructs_max_negative_weights_lso.png'))
for f in glob.glob(op.join('/home/flo/Desktop', '*.png')):
os.remove(f)
for weight, substr in top_inhibitors:
plot_smarts(substr, '/home/flo/Desktop')
ims = [Image.open(f) for f in glob.glob(op.join('/home/flo/Desktop', '*.png'))]
num_lines = math.ceil(float(len(ims))/4)
blank_image = Image.new("RGB", (800, int(num_lines*200)), color='white')
for i, im in enumerate(ims):
im.thumbnail((200,200), Image.ANTIALIAS)
blank_image.paste(im, (200 * (i%4), 200 * (i/4)))
blank_image.save(op.join(MANYSOURCES_ROOT, 'data', 'results', dset,
'substructs_max_positive_weights_lso.png'))
for f in glob.glob(op.join('/home/flo/Desktop', '*.png')):
os.remove(f)
return top_inactives, top_inhibitors
print positive_negative_substructs()
exit(33)
"""
What can we do with this information?
1. compare rankings: are the most weighted substructures (in absolute value) still highly weighted when x source is
in test?
2. check the change in weight for each source across all the expids and do a t-test to check which ones are
significant
3. which substructures are present in the source? Do they show significant change in weight?
4. were those substructures actually important (high absolute value of weight) when the source was part of the
training set?
9. Can we correlate the change of weight (second whiskerplot with only substructures occurring in source) with a worse
prediction of the source? (in terms of average loss for all mols for all splits where the source is in external set)
Comments from Santi:
5. How hard would it be to adapt your code to find out a ranking of features according to how much did they "changed
weight" between "source in" and "source out". That maybe would allow us to highlight concrete features.
6. How hard would it be to restrict this same plot to only subsets of relevant features (substructures) for each source.
And by relevant I mean "substructures that actually appear in the source". For large sources, I would expect not a big
change (because most substructures will be relevant for the source). But for not so big sources, I would expect this
to change the plot dramatically. I think so because I believe that important changes in substructures get hidden by the
overwhelming majority of features that do not care about a source being or not available for training.
7. Can you imagine a way of labelling folds according to whether a molecule was better or worse predicted than average?
8. Can you imagine that performing regression on a weight of a feature we find interesting, using as predictors the
coocurrences, would somehow allow us to explain what coocurrences make a feature important/unimportant?
"""
def test_ranking(weights_in, weights_out):
import scipy.stats as stats
return stats.spearmanr(weights_in, weights_out) # returns r and the p-value associated
def overall_ranking(source, model='logreg3', feats='ecfps1', dset='bcrp'):
"""
Creates a dictionary and pickles it. Does not return anything. For each expid, computes the Spearman coeff correl
between the weights in and the weights out (across all substructures)
"""
if op.exists(op.join(MANYSOURCES_ROOT, 'data', 'results', dset,
'spearmans_lso_' + source + '_' + model + '_' + feats + '.dict')):
return
spearmans = {}
expids, all_weights_in, all_weights_out = substructs_weights_one_source(source=source, model=model, feats=feats,
dset=dset)
print len(expids), len(all_weights_in), len(all_weights_out)
for expid, weights_in, weights_out in zip(expids, all_weights_in, all_weights_out):
spearmanr, pvalue = test_ranking(weights_in, weights_out)
spearmans[expid] = (spearmanr, pvalue)
print expid, spearmanr, pvalue
with open(op.join(MANYSOURCES_ROOT, 'data', 'results', dset, 'spearmans_lso_' + source + '_' + model + '_' + feats + '.dict'), 'wb') as writer:
pickle.dump(spearmans, writer, protocol=pickle.HIGHEST_PROTOCOL)
def ranking_relevant_features(source, model='logreg3', feats='ecfps1', dset='bcrp'):
"""
Same as before but by "relevant features", we mean features that actually occur in the given source
"""
if op.exists(op.join(MANYSOURCES_ROOT, 'data', 'results', dset, 'spearmans_lso_relfeats_' + source + '_' + model + '_' + feats + '.dict')):
return
spearmans = {}
expids, all_weights_in, all_weights_out = substructs_weights_one_source(source=source, model=model, feats=feats,
dset=dset)
relevant_feature_indexes = list(faster_relevant_feats(source, dset=dset))
# Select only weights that correspond to relevant features for the given source
for expid, weights_in, weights_out in zip(expids, all_weights_in, all_weights_out):
# only to the Spearman test on the relevant feature weights
spearmanr, pvalue = test_ranking(weights_in[relevant_feature_indexes], weights_out[relevant_feature_indexes])
spearmans[expid] = (spearmanr, pvalue)
print expid, spearmanr, pvalue
with open(op.join(MANYSOURCES_ROOT, 'data', 'results', dset,
'spearmans_lso_relfeats_' + source + '_' + model + '_' + feats + '.dict'), 'wb') as writer:
pickle.dump(spearmans, writer, protocol=pickle.HIGHEST_PROTOCOL)
def plot_spearman_coefs_all_sources(dir, model='logreg3', feats='ecfps1', dset='bcrp'):
big_dict = {}
# list all spearman files
for f in glob.glob(op.join(dir, 'spearmans_lso_*')):
if not 'relfeats' in op.basename(f):
source = op.basename(f).partition('_lso_')[2].partition('_logreg')[0]
print source
with open(f, 'rb') as reader:
dict_spearman = pickle.load(reader)
spearmans = map(lambda x: x[0], dict_spearman.values())
big_dict[source] = spearmans
df = pd.DataFrame.from_dict(big_dict)
tidy_df = pd.melt(df, var_name='source', value_name='Spearman correlation coefficient')
import seaborn
seaborn.set_style("ticks")
seaborn.set_context("talk")
seaborn.boxplot('source', 'Spearman correlation coefficient', data=tidy_df)
locs, labels = plt.xticks()
plt.setp(labels, rotation=90)
plt.xlabel('Source')
plt.ylabel('Spearman correlation of feature weights')
plt.ylim([0,1])
plt.show()
def plot_spearman_only_relevant_feats_all_sources(dir, model='logreg3', feats='ecfps1', dset='bcrp'):
big_dict = {}
# list all spearman files
for f in glob.glob(op.join(dir, 'spearmans_*')):
if 'relfeats' in op.basename(f):
source = op.basename(f).partition('_lso_relfeats_')[2].partition('_logreg')[0]
print source
with open(f, 'rb') as reader:
dict_spearman = pickle.load(reader)
spearmans = map(lambda x: x[0], dict_spearman.values())
big_dict[source] = spearmans
df = pd.DataFrame.from_dict(big_dict)
tidy_df = pd.melt(df, var_name='source', value_name='Spearman correlation coefficient')
import seaborn
seaborn.set_style("ticks")
seaborn.set_context("talk")
seaborn.boxplot('source', 'Spearman correlation coefficient', data=tidy_df)
locs, labels = plt.xticks()
plt.setp(labels, rotation=90)
plt.title('Spearman correlations across 4096 experiments, '
'\nchecking the weights of the relevant features\nwhen the source is in training or in test')
plt.xlabel('Source')
plt.ylabel('Spearman correlation of feature weights')
plt.ylim([0,1])
plt.show()
def paired_ttest(weights_in, weights_out):
# TODO: also do it with the bayesian approach
# Null hypothesis: there is no weight difference.
from scipy.stats import ttest_1samp
differences = weights_in - weights_out
return ttest_1samp(differences, 0) # returns t and the p-value associated
def overall_ttest(source, model='logreg3', feats='ecfps1', dset='bcrp'):
ttests = {}
expids, all_weights_in, all_weights_out = substructs_weights_one_source(source=source, model=model, feats=feats,
dset=dset)
for expid, weights_in, weights_out in zip(expids, all_weights_in, all_weights_out):
t, pvalue = paired_ttest(weights_in, weights_out)
ttests[expid] = (t, pvalue)
print expid, t, pvalue
with open(op.join(MANYSOURCES_ROOT, 'data', 'results', dset, 'paired_ttest_lso_' + source + '_' + model + '_' + feats + '.dict'), 'wb') as writer:
pickle.dump(ttests, writer, protocol=pickle.HIGHEST_PROTOCOL)
def ttest_per_substructure(source, model='logreg3', feats='ecfps1', dset='bcrp'):
ttests = {}
expids, all_weights_in, all_weights_out = substructs_weights_one_source(source=source, model=model, feats=feats,
dset=dset)
print np.array(all_weights_in).shape, np.array(all_weights_out).shape
i2s = ManysourcesDataset(dset).ecfps(no_dupes=True).i2s
all_weights_in = list(np.array(all_weights_in).T)
all_weights_out = list(np.array(all_weights_out).T)
print len(all_weights_in), len(all_weights_out)
for i, weights_in in enumerate(all_weights_in):
ttests[i2s[i]] = paired_ttest(weights_in, all_weights_out[i])
if i%10 == 0:
print i2s[i], weights_in
with open(op.join(MANYSOURCES_ROOT, 'data', 'results', dset, 'paired_ttest_lso_bysubstruct_' + source + '_' + model + '_' + feats + '.dict'), 'wb') as writer:
pickle.dump(ttests, writer, protocol=pickle.HIGHEST_PROTOCOL)
def do_job_question_3(source, model='logreg3', feats='ecfps1', dset='bcrp', significant=0.01):
# I really should make it faster
# Read the t-test per substructure file
significant_substructures = []
with open(op.join(MANYSOURCES_ROOT, 'data', 'results', dset, 'paired_ttest_lso_bysubstruct_' + source + '_' + model + '_' + feats + '.dict'), 'rb') as reader:
ttests = pickle.load(reader)
for substructure, ttest_res in ttests.iteritems():
if ttest_res[1] <= significant:
if substructure_appears_in_source(substructure, source, dataset_nickname=dset) != (0,0):
print substructure, ttest_res[1]
significant_substructures.append(substructure)
return significant_substructures
def analyse_most_changing_substructs(source, dset='bcrp', model='logreg3', feats='ecfps1', top=10,
temp_dir='/home/floriane/Desktop'):
"""
Check which substructures are in the source among all those that were showing important changes in weight. Plots
the substructure using SmartsViewer.
"""
substructs_dict = most_changing_substructs_source(dset, model=model, feats=feats, source=source, top=top)
distinct_subs = len(substructs_dict)
indices_in_source = list(faster_relevant_feats(source, dset))
i2s = ManysourcesDataset(dset).ecfps(no_dupes=True).i2s
substructs_in_source = i2s[indices_in_source]
in_source_and_changing = [sub for sub in substructs_dict.keys() if sub in substructs_in_source ]
print in_source_and_changing
print "Proportion of substructures that most change in weight that actually appear in %s: %.2f" % \
(source, float(len(in_source_and_changing))/distinct_subs)
from chemdeco.integration import smartsviewer_utils
from PIL import Image
for substr in in_source_and_changing:
if len(substr) > 1: # let's remove the C and c...
svr = smartsviewer_utils.SmartsViewerRunner(w=200, h=200)
svr.depict(substr, op.join(temp_dir, substr + '.png'))
ims = [Image.open(f) for f in glob.glob(op.join(temp_dir, '*.png'))]
num_lines = math.ceil(float(len(ims))/4)
blank_image = Image.new("RGB", (800, int(num_lines*200)), color='white')
for i, im in enumerate(ims):
im.thumbnail((200,200), Image.ANTIALIAS)
blank_image.paste(im, (200 * (i%4), 200 * (i/4)))
blank_image.save(op.join(MANYSOURCES_ROOT, 'data', 'results', dset, 'substructs_in_%s_max_change_weight_%s_%s.png'
%(source, model, feats)))
# TODO: automatically remove the images from the temp_dir
def barplot_most_changing_substructs(dset='bcrp', model='logreg3', feats='ecfps1', top=10):
"""
Plots a 2-bar per source bar plot: first bar corresponds to the amount of substructures that most changed weight in
the in / out experiments. Second bar corresponds to the amount of those substructures that actually occur in the
source
"""
values_total = []
values_insource = []
sources = ManysourcesDataset(dset='bcrp').molecules().present_sources()
values_dict = {}
for source in sources:
print source
substructs_dict = most_changing_substructs_source(dset, model=model, feats=feats, source=source, top=top)
values_total.append(len(substructs_dict))
indices_in_source = list(faster_relevant_feats(source, dset))
i2s = ManysourcesDataset(dset).ecfps(no_dupes=True).i2s
substructs_in_source = i2s[indices_in_source]
in_source_and_changing = [sub for sub in substructs_dict.keys() if sub in substructs_in_source]
values_insource.append(len(in_source_and_changing))
values_dict['source'] = list(sources)
values_dict['total'] = values_total
values_dict['in source'] = values_insource
ind = np.arange(len(sources)) # the x locations for the groups
width = 0.35 # the width of the bars
import seaborn
seaborn.set_style("ticks")
seaborn.set_context("talk")
seaborn.set_palette('deep')
fig, ax = plt.subplots()
rects1 = ax.bar(ind, values_dict['total'], width, color='0.75')
rects2 = ax.bar(ind+width, values_dict['in source'], width)
ax.legend((rects1[0], rects2[0]), ('Total', 'In source only') )
locs, labels = plt.xticks(map(lambda x: x + width, ind), values_dict['source'])
plt.setp(labels, rotation=90)
plt.xlabel('Source')
plt.ylabel('Number of substructures most changing weight')
plt.ylim([0,320])
plt.show()
def losses_correl_weight_changes(dset='bcrp', model='logreg3', feats='ecfps1', expids=tuple(range(4096)), calib="'0.1'"):
"""
Plots the correlation between the average loss per source with the average relevant spearman correlation per source.
"""
# Copied from Santi but then changed a bit to fit my needs. Reads the losses for the given model
def read_cached():
cache_path = op.join(MANYSOURCES_ROOT, 'data', 'results', 'square_losses.h5')
result_coords = '/dset=bcrp/feats=ecfps1/model=logreg3/lso=True/score_calibration=\'0-1\''
with h5py.File(cache_path, 'r') as h5:
group = h5[result_coords]
infile_expids = group['expids'][()] if expids is not None else expids
if 0 == len(set(expids) - set(infile_expids[:, 0])):
e2r = {e: i for e, i in infile_expids if i >= 0}
ok_expids = [expid for expid in expids if expid in e2r]
rows = [e2r[expid] for expid in ok_expids]
losses = group['losses'][rows].T
molids = group['molids'][:]
return pd.DataFrame(losses, columns=ok_expids, index=molids)
losses = read_cached()
molids = list(losses.index)
#print molids
equivalence_to_source = ManysourcesDataset(dset).mols().molids2sources(molids)
losses['source'] = equivalence_to_source
df_mean_loss = losses.groupby('source').mean() # average loss per source per expid
dict_mean_loss = defaultdict(float)
for src in df_mean_loss.index:
dict_mean_loss[src] = np.array(list(df_mean_loss.loc[src])).mean()
df_mean_loss = pd.DataFrame.from_dict(dict_mean_loss, orient='index')
df_mean_loss.columns = ['average_loss']
big_dict = {}
# list all spearman files
for f in glob.glob(op.join(op.join(MANYSOURCES_ROOT, 'data', 'results', 'bcrp'), 'spearmans_*')):
if 'relfeats' in op.basename(f):
source = op.basename(f).partition('_lso_relfeats_')[2].partition('_logreg')[0]
#print source
with open(f, 'rb') as reader:
dict_spearman = pickle.load(reader)
spearmans = map(lambda x: x[0], dict_spearman.values())
big_dict[source] = np.array(spearmans).mean()
df_mean_loss['spearmans'] = [big_dict[source] for source in df_mean_loss.index]
print df_mean_loss
# plot correlation
import seaborn
seaborn.set_style("ticks")
seaborn.set_context("talk")
seaborn.set_palette('deep')
seaborn.set_context(rc={'lines.markeredgewidth': 0.1}) # otherwise we only see regression line, see
#http://stackoverflow.com/questions/26618339/new-version-of-matplotlib-with-seaborn-line-markers-not-functioning
seaborn.lmplot('spearmans', 'average_loss', df_mean_loss, scatter_kws={"marker": ".", 's': 50})
#plt.scatter([big_dict[src] for src in sources], [dict_mean_loss[src] for src in sources])
plt.xlabel('Spearman coefficient correlation of feature importances')
plt.ylabel('Average loss when source is in external set')
plt.title('Absence of correlation between hardness to predict (high loss) \nand high change in feature weights '
'at the source level')
plt.show()
def relationship_spearman_size_source(dir, model='logreg3', feats='ecfps1', dset='bcrp'):
"""
Plots the relationship between the size of the source vs the average relevant Spearman corr coeff. One point per
source on the plot.
"""
small_dict = defaultdict(list)
# list all spearman files
for f in glob.glob(op.join(dir, 'spearmans_*')):
if 'relfeats' in op.basename(f):
source = op.basename(f).partition('_lso_relfeats_')[2].partition('_logreg')[0]
print source
small_dict['source'].append(source)
small_dict['size'].append(len(ManysourcesDataset(dset).mols().sources2molids([source])))
with open(f, 'rb') as reader:
dict_spearman = pickle.load(reader)
spearmans = map(lambda x: x[0], dict_spearman.values())
small_dict['average spearman'].append(np.mean(np.array(spearmans)))
df = pd.DataFrame.from_dict(small_dict)
import seaborn
seaborn.set_style("ticks")
seaborn.set_context("talk")
seaborn.lmplot('size', 'average spearman', data=df, scatter_kws={"marker": "o", "color": "slategray"},
line_kws={"linewidth": 1, "color": "seagreen"})
plt.show()
if __name__ == '__main__':
#sources = ManysourcesDataset(dset='hERG').molecules().present_sources()
#for source in sources:
# print source
# most_changing_substructs_source(source=source, dset='bcrp', top=10)
#ttest_per_substructure(source)
#plot_spearman_coefs_all_sources(op.join(MANYSOURCES_ROOT, 'data', 'results', 'bcrp'))
#do_job_question_3('Zembruski_2011')
#cache_relevant_features_all_sources()
#plot_spearman_only_relevant_feats_all_sources(op.join(MANYSOURCES_ROOT, 'data', 'results', 'bcrp'))
#plot_spearman_coefs_all_sources(op.join(MANYSOURCES_ROOT, 'data', 'results', 'bcrp'))
#barplot_most_changing_substructs()
#proportion_relevant_features('flavonoids_Zhang_2004')
#print substructures_change_weight('flavonoids_Zhang_2004')
#relationship_spearman_size_source(op.join(MANYSOURCES_ROOT, 'data', 'results', 'bcrp'))
source = 'Ochoa-Puentes_2011'
analyse_most_changing_substructs(source, dset='bcrp')
| bsd-3-clause |
MazamaScience/ispaq | ispaq/concierge.py | 1 | 32479 | """
ISPAQ Data Access Expediter.
:copyright:
Mazama Science
:license:
GNU Lesser General Public License, Version 3
(http://www.gnu.org/copyleft/lesser.html)
"""
from __future__ import (absolute_import, division, print_function)
import os
import re
import glob
import pandas as pd
import obspy
from obspy.clients.fdsn import Client
from obspy.clients.fdsn.header import URL_MAPPINGS
# ISPAQ modules
from .user_request import UserRequest
from . import irisseismic
# Custom exceptions
class NoAvailableDataError(Exception):
"""No matching data are available."""
class Concierge(object):
"""
ISPAQ Data Access Expediter.
:type user_request: :class:`~ispaq.concierge.user_request`
:param user_request: User request containing the combination of command-line
arguments and information from the parsed user preferences file.
:rtype: :class:`~ispaq.concierge` or ``None``
:return: ISPAQ Concierge.
.. rubric:: Example
TODO: include doctest examples
"""
def __init__(self, user_request=None, logger=None):
"""
Initializes the ISPAQ data access expediter.
See :mod:`ispaq.concierge` for all parameters.
"""
# Keep the entire UserRequest and logger
self.user_request = user_request
self.logger = logger
# Copy important UserRequest properties to the Concierge for smpler access
self.requested_starttime = user_request.requested_starttime
self.requested_endtime = user_request.requested_endtime
self.metric_names = user_request.metrics
self.sncl_patterns = user_request.sncls
self.function_by_logic = user_request.function_by_logic
self.logic_types = user_request.function_by_logic.keys()
# Individual elements from the Preferences: section of the preferences file
self.csv_output_dir = user_request.csv_output_dir
self.plot_output_dir = user_request.plot_output_dir
self.sigfigs = user_request.sigfigs
# Output information
file_base = '%s_%s_%s' % (self.user_request.requested_metric_set,
self.user_request.requested_sncl_set,
self.requested_starttime.date)
self.output_file_base = self.csv_output_dir + '/' + file_base
# Availability dataframe is stored if it is read from a local file
self.availability = None
# Filtered availability dataframe is stored for potential reuse
self.filtered_availability = None
# Add dataselect clients and URLs or reference a local file
if user_request.dataselect_url in URL_MAPPINGS.keys():
# Get data from FDSN dataselect service
self.dataselect_url = URL_MAPPINGS[user_request.dataselect_url]
self.dataselect_client = Client(user_request.dataselect_url)
else:
if os.path.exists(os.path.abspath(user_request.dataselect_url)):
# Get data from local miniseed files
self.dataselect_url = os.path.abspath(user_request.dataselect_url)
self.dataselect_client = None
else:
err_msg = "Cannot find preference file dataselect_url: '%s'" % user_request.dataselect_url
self.logger.error(err_msg)
raise ValueError(err_msg)
# Add event clients and URLs or reference a local file
if user_request.event_url in URL_MAPPINGS.keys():
self.event_url = URL_MAPPINGS[user_request.event_url]
self.event_client = Client(user_request.event_url)
else:
if os.path.exists(os.path.abspath(user_request.event_url)):
# Get data from local QUAKEML files
self.event_url = os.path.abspath(user_request.event_url)
self.event_client = None
else:
err_msg = "Cannot find preference file event_url: '%s'" % user_request.event_url
self.logger.error(err_msg)
raise ValueError(err_msg)
# Add station clients and URLs or reference a local file
if user_request.station_url in URL_MAPPINGS.keys():
self.station_url = URL_MAPPINGS[user_request.station_url]
self.station_client = Client(user_request.station_url)
else:
if os.path.exists(os.path.abspath(user_request.station_url)):
# Get data from local StationXML files
self.station_url = os.path.abspath(user_request.station_url)
self.station_client = None
else:
err_msg = "Cannot find preference file station_url: '%s'" % user_request.station_url
self.logger.error(err_msg)
raise ValueError(err_msg)
def get_availability(self,
network=None, station=None, location=None, channel=None,
starttime=None, endtime=None, includerestricted=None,
latitude=None, longitude=None, minradius=None, maxradius=None):
"""
################################################################################
# getAvailability method returns a dataframe with information from the output
# of the fdsn station web service with "format=text&level=channel".
# With additional parameters, this webservice returns information on all
# matching SNCLs that have available data.
#
# The fdsnws/station/availability web service will return space characters for location
# codes that are SPACE SPACE.
#
# http://service.iris.edu/fdsnws/station/1/
#
# #Network | Station | Location | Channel | Latitude | Longitude | Elevation | Depth | Azimuth | Dip | Instrument | Scale | ScaleFreq | ScaleUnits | SampleRate | StartTime | EndTime
# CU|ANWB|00|LHZ|17.66853|-61.78557|39.0|0.0|0.0|-90.0|Streckeisen STS-2 Standard-gain|2.43609E9|0.05|M/S|1.0|2010-02-10T18:35:00|2599-12-31T23:59:59
#
################################################################################
if (!isGeneric("getAvailability")) {
setGeneric("getAvailability", function(obj, network, station, location, channel,
starttime, endtime, includerestricted,
latitude, longitude, minradius, maxradius) {
standardGeneric("getAvailability")
})
}
# END of R documentation
Returns a dataframe of SNCLs available from the `station_url` source
specified in the `user_request` object used to initialize the
`Concierge`.
By default, information in the `user_request` is used to generate
a FDSN webservices request for station data. Where arguments are
provided, these are used to override the information found in
`user_request.
:type network: str
:param network: Select one or more network codes. Can be SEED network
codes or data center defined codes. Multiple codes are
comma-separated.
:type station: str
:param station: Select one or more SEED station codes. Multiple codes
are comma-separated.
:type location: str
:param location: Select one or more SEED location identifiers. Multiple
identifiers are comma-separated. As a special case ``"--"`` (two
dashes) will be translated to a string of two space characters to
match blank location IDs.
:type channel: str
:param channel: Select one or more SEED channel codes. Multiple codes
are comma-separated.
:type starttime: :class:`~obspy.core.utcdatetime.UTCDateTime`
:param starttime: Limit to metadata epochs starting on or after the
specified start time.
:type endtime: :class:`~obspy.core.utcdatetime.UTCDateTime`
:param endtime: Limit to metadata epochs ending on or before the
specified end time.
:type includerestricted: bool
:param includerestricted: Specify if results should include information
for restricted stations.
:type latitude: float
:param latitude: Specify the latitude to be used for a radius search.
:type longitude: float
:param longitude: Specify the longitude to the used for a radius
search.
:type minradius: float
:param minradius: Limit results to stations within the specified
minimum number of degrees from the geographic point defined by the
latitude and longitude parameters.
:type maxradius: float
:param maxradius: Limit results to stations within the specified
maximum number of degrees from the geographic point defined by the
latitude and longitude parameters.
#.. rubric:: Example
#>>> my_request = UserRequest(dummy=True)
#>>> concierge = Concierge(my_request)
#>>> concierge.get_availability() #doctest: +ELLIPSIS
#[u'US.OXF..BHE', u'US.OXF..BHN', u'US.OXF..BHZ']
"""
# NOTE: Building the availability dataframe from a large StationXML is time consuming.
# NOTE: If we are using local station data then we should only do this once.
# Special case when using all defaults helps speed up any metrics making mutiple calls to get_availability
if (network is None and
station is None and
location is None and
channel is None and
starttime is None and
endtime is None and
self.filtered_availability is not None):
return(self.filtered_availability)
# Read from a local StationXML file one time only
if self.station_client is None:
# Only read/parse if we haven't already done so
if self.availability is None:
try:
self.logger.info("Reading StationXML file %s" % self.station_url)
sncl_inventory = obspy.read_inventory(self.station_url)
except Exception as e:
err_msg = "The StationXML file: '%s' is not valid" % self.station_url
self.logger.debug(e)
self.logger.error(err_msg)
raise ValueError(err_msg)
self.logger.debug('Building availability dataframe...')
# Set up empty dataframe
df = pd.DataFrame(columns=("network", "station", "location", "channel",
"latitude", "longitude", "elevation", "depth" ,
"azimuth", "dip", "instrument",
"scale", "scalefreq", "scaleunits", "samplerate",
"starttime", "endtime", "snclId"))
# Walk through the Inventory object
for n in sncl_inventory.networks:
for s in n.stations:
for c in s.channels:
snclId = n.code + "." + s.code + "." + c.location_code + "." + c.code
df.loc[len(df)] = [n.code, s.code, c.location_code, c.code,
c.latitude, c.longitude, c.elevation, c.depth,
c.azimuth, c.dip, c.sensor.description,
None, # TODO: Figure out how to get instrument 'scale'
None, # TODO: Figure out how to get instrument 'scalefreq'
None, # TODO: Figure out how to get instrument 'scaleunits'
c.sample_rate,
c.start_date, c.end_date, snclId]
# Save this dataframe internally
self.logger.debug('Finished creating availability dataframe')
self.availability = df
# Container for all of the individual sncl_pattern dataframes generated
sncl_pattern_dataframes = []
# Loop through all sncl_patterns ---------------------------------------
for sncl_pattern in self.sncl_patterns:
# Get "User Reqeust" parameters
(UR_network, UR_station, UR_location, UR_channel) = sncl_pattern.split('.')
# Allow arguments to override UserRequest parameters
if starttime is None:
_starttime = self.requested_starttime
else:
_starttime = starttime
if endtime is None:
_endtime = self.requested_endtime
else:
_endtime = endtime
if network is None:
_network = UR_network
else:
_network = network
if station is None:
_station = UR_station
else:
_station = station
if location is None:
_location = UR_location
else:
_location = location
if channel is None:
_channel = UR_channel
else:
_channel = channel
_sncl_pattern = "%s.%s.%s.%s" % (_network,_station,_location,_channel)
# Get availability dataframe ---------------------------------------
if self.station_client is None:
# Use internal dataframe
df = self.availability
else:
# Read from FDSN web services
try:
sncl_inventory = self.station_client.get_stations(starttime=_starttime, endtime=_endtime,
network=_network, station=_station,
location=_location, channel=_channel,
includerestricted=None,
latitude=latitude, longitude=longitude,
minradius=minradius, maxradius=maxradius,
level="channel")
except Exception as e:
err_msg = "No sncls matching %s found at %s" % (_sncl_pattern, self.station_url)
self.logger.debug(e)
self.logger.warning(err_msg)
continue
self.logger.debug('Building availability dataframe...')
# Set up empty dataframe
df = pd.DataFrame(columns=("network", "station", "location", "channel",
"latitude", "longitude", "elevation", "depth" ,
"azimuth", "dip", "instrument",
"scale", "scalefreq", "scaleunits", "samplerate",
"starttime", "endtime", "snclId"))
# Walk through the Inventory object
for n in sncl_inventory.networks:
for s in n.stations:
for c in s.channels:
snclId = n.code + "." + s.code + "." + c.location_code + "." + c.code
df.loc[len(df)] = [n.code, s.code, c.location_code, c.code,
c.latitude, c.longitude, c.elevation, c.depth,
c.azimuth, c.dip, c.sensor.description,
None, # TODO: Figure out how to get instrument 'scale'
None, # TODO: Figure out how to get instrument 'scalefreq'
None, # TODO: Figure out how to get instrument 'scaleunits'
c.sample_rate,
c.start_date, c.end_date, snclId]
# Subset availability dataframe based on _sncl_pattern -------------
# NOTE: This shouldn't be necessary for dataframes obtained from FDSN
# NOTE: but it's quick so we always do it
# Create python regex from _sncl_pattern
# NOTE: Replace '.' first before introducing '.*' or '.'!
py_pattern = _sncl_pattern.replace('.','\\.').replace('*','.*').replace('?','.')
# Filter dataframe
df = df[df.snclId.str.contains(py_pattern)]
# Subset based on locally available data ---------------------------
if self.dataselect_client is None:
filename = '%s.%s.%s.%s.%s' % (_network, _station, _location, _channel, _starttime.strftime('%Y.%j'))
filepattern = self.dataselect_url + '/' + filename + '*' # Allow for possible quality codes
matching_files = glob.glob(filepattern)
if (len(matching_files) == 0):
err_msg = "No local waveforms matching %s" % filepattern
self.logger.debug(err_msg)
continue
else:
# Create a mask based on available file names
mask = df.snclId.str.contains("MASK WITH ALL FALSE")
for i in range(len(matching_files)):
basename = os.path.basename(matching_files[i])
match = re.match('[^\\.]*\\.[^\\.]*\\.[^\\.]*\\.[^\\.]*',basename)
sncl = match.group(0)
py_pattern = sncl.replace('.','\\.')
mask = mask | df.snclId.str.contains(py_pattern)
# Subset based on the mask
df = df[mask]
# Append this dataframe
if df.shape[0] == 0:
self.logger.debug("No SNCLS found matching '%s'" % _sncl_pattern)
else:
sncl_pattern_dataframes.append(df)
# END of sncl_patterns loop --------------------------------------------
if len(sncl_pattern_dataframes) == 0:
err_msg = "No available waveforms matching" + str(self.sncl_patterns)
self.logger.info(err_msg)
raise NoAvailableDataError(err_msg)
else:
availability = pd.concat(sncl_pattern_dataframes, ignore_index=True)
# TODO: remove duplicates
if availability.shape[0] == 0:
err_msg = "No available waveforms matching" + str(self.sncl_patterns)
self.logger.info(err_msg)
raise NoAvailableDataError(err_msg)
else:
# The concierge should remember this dataframe for metrics that
# make multiple calls to get_availability with all defaults.
self.filtered_availability = availability
return availability
def get_dataselect(self,
network=None, station=None, location=None, channel=None,
starttime=None, endtime=None, quality="B",
inclusiveEnd=True, ignoreEpoch=False):
"""
Returns an R Stream that can be passed to metrics calculation methods.
All arguments are required except for starttime and endtime. These arguments
may be specified but will default to the time information found in the
`user_request` used to generate a FDSN webservices request for MINIseed data.
:type network: str
:param network: Select one or more network codes. Can be SEED network
codes or data center defined codes. Multiple codes are
comma-separated.
:type station: str
:param station: Select one or more SEED station codes. Multiple codes
are comma-separated.
:type location: str
:param location: Select one or more SEED location identifiers. Multiple
identifiers are comma-separated. As a special case ``"--"`` (two
dashes) will be translated to a string of two space characters to
match blank location IDs.
:type channel: str
:param channel: Select one or more SEED channel codes. Multiple codes
are comma-separated.
:type starttime: :class:`~obspy.core.utcdatetime.UTCDateTime`
:param starttime: Limit to metadata epochs starting on or after the
specified start time.
:type endtime: :class:`~obspy.core.utcdatetime.UTCDateTime`
:param endtime: Limit to metadata epochs ending on or before the
specified end time.
"""
# Allow arguments to override UserRequest parameters
if starttime is None:
_starttime = self.requested_starttime
else:
_starttime = starttime
if endtime is None:
_endtime = self.requested_endtime
else:
_endtime = endtime
if self.dataselect_client is None:
# Read local MINIseed file and convert to R_Stream
filename = '%s.%s.%s.%s.%s' % (network, station, location, channel, _starttime.strftime('%Y.%j'))
filepattern = self.dataselect_url + '/' + filename + '*' # Allow for possible quality codes
matching_files = glob.glob(filepattern)
if (len(matching_files) == 0):
self.logger.info("No files found matching '%s'" % (filepattern))
else:
filepath = matching_files[0]
if (len(matching_files) > 1):
self.logger.warning("Multiple files found matching" '%s -- using %s' % (filepattern, filepath))
try:
# Get the ObsPy version of the stream
py_stream = obspy.read(filepath)
py_stream = py_stream.slice(_starttime, _endtime)
# NOTE: ObsPy does not store state-of-health flags with each stream.
# NOTE: We need to read them in separately from the miniseed file.
flag_dict = obspy.io.mseed.util.get_timing_and_data_quality(filepath)
act_flags = [0,0,0,0,0,0,0,0] # TODO: Find a way to read act_flags
io_flags = [0,0,0,0,0,0,0,0] # TODO: Find a way to read io_flags
dq_flags = flag_dict['data_quality_flags']
# NOTE: ObsPy does not store station metadata with each trace.
# NOTE: We need to read them in separately from station metadata.
availability = self.get_availability(network, station, location, channel, _starttime, _endtime)
sensor = availability.instrument[0]
scale = availability.scale[0]
scalefreq = availability.scalefreq[0]
scaleunits = availability.scaleunits[0]
if sensor is None: sensor = "" # default from IRISSeismic Trace class prototype
if scale is None: scale = 1.0 # default from IRISSeismic Trace class prototype
if scalefreq is None: scalefreq = 1.0 # default from IRISSeismic Trace class prototype
if scaleunits is None: scaleunits = "" # default from IRISSeismic Trace class prototype
latitude = availability.latitude[0]
longitude = availability.longitude[0]
elevation = availability.elevation[0]
depth = availability.depth[0]
azimuth = availability.azimuth[0]
dip = availability.dip[0]
# Create the IRISSeismic version of the stream
r_stream = irisseismic.R_Stream(py_stream, _starttime, _endtime, act_flags, io_flags, dq_flags,
sensor, scale, scalefreq, scaleunits, latitude, longitude, elevation, depth, azimuth, dip)
except Exception as e:
err_msg = "Error reading in local waveform from %s" % filepath
self.logger.debug(e)
self.logger.error(err_msg)
raise
else:
# Read from FDSN web services
try:
r_stream = irisseismic.R_getDataselect(self.dataselect_url, network, station, location, channel, _starttime, _endtime, quality, inclusiveEnd, ignoreEpoch)
except Exception as e:
err_msg = "Error reading in waveform from %s webservice" % self.dataselect_client
self.logger.debug(e)
self.logger.error(err_msg)
raise
# TODO: Do we need to test for valid R_Stream.
if False:
return None # TODO: raise an exception
else:
return r_stream
def get_event(self,
starttime=None, endtime=None,
minmag=5.5, maxmag=None, magtype=None,
mindepth=None, maxdepth=None):
"""
################################################################################
# getEvent method returns seismic event data from the event webservice:
#
# http://service.iris.edu/fdsnws/event/1/
#
# TODO: The getEvent method could be fleshed out with a more complete list
# TODO: of arguments to be used as ws-event parameters.
################################################################################
# http://service.iris.edu/fdsnws/event/1/query?starttime=2013-02-01T00:00:00&endtime=2013-02-02T00:00:00&minmag=5&format=text
#
# #EventID | Time | Latitude | Longitude | Depth | Author | Catalog | Contributor | ContributorID | MagType | Magnitude | MagAuthor | EventLocationName
# 4075900|2013-02-01T22:18:33|-11.12|165.378|10.0|NEIC|NEIC PDE|NEIC PDE-Q||MW|6.4|GCMT|SANTA CRUZ ISLANDS
if (!isGeneric("getEvent")) {
setGeneric("getEvent", function(obj, starttime, endtime, minmag, maxmag, magtype,
mindepth, maxdepth) {
standardGeneric("getEvent")
})
}
# END of R documentation
Returns a dataframe of events returned by the `event_url` source
specified in the `user_request` object used to initialize the
`Concierge`.
By default, information in the `user_request` is used to generate
a FDSN webservices request for event data. Where arguments are
provided, these are used to override the information found in
`user_request.
:type starttime: :class:`~obspy.core.utcdatetime.UTCDateTime`
:param starttime: Limit to metadata epochs starting on or after the
specified start time.
:type endtime: :class:`~obspy.core.utcdatetime.UTCDateTime`
:param endtime: Limit to metadata epochs ending on or before the
specified end time.
:type minmagnitude: float, optional
:param minmagnitude: Limit to events with a magnitude larger than the
specified minimum.
:type maxmagnitude: float, optional
:param maxmagnitude: Limit to events with a magnitude smaller than the
specified maximum.
:type magnitudetype: str, optional
:param magnitudetype: Specify a magnitude type to use for testing the
minimum and maximum limits.
:type mindepth: float, optional
:param mindepth: Limit to events with depth, in kilometers, larger than
the specified minimum.
:type maxdepth: float, optional
:param maxdepth: Limit to events with depth, in kilometers, smaller
than the specified maximum.
#.. rubric:: Example
#>>> my_request = UserRequest(dummy=True)
#>>> concierge = Concierge(my_request)
#>>> concierge.get_event() #doctest: +ELLIPSIS
'
eventId time latitude longitude depth author...'
"""
# Allow arguments to override UserRequest parameters
if starttime is None:
_starttime = self.requested_starttime
else:
_starttime = starttime
if endtime is None:
_endtime = self.requested_endtime
else:
_endtime = endtime
if self.event_client is None:
# Read local QuakeML file
try:
event_catalog = obspy.read_events(self.event_url)
except Exception as e:
err_msg = "The StationXML file: '%s' is not valid." % self.station_url
self.logger.debug(e)
self.logger.error(err_msg)
raise ValueError(err_msg)
# events.columns
# Index([u'eventId', u'time', u'latitude', u'longitude', u'depth', u'author',
# u'cCatalog', u'contributor', u'contributorId', u'magType', u'magnitude',
# u'magAuthor', u'eventLocationName'],
# dtype='object')
#
dataframes = []
for event in event_catalog:
origin = event.preferred_origin()
magnitude = event.preferred_magnitude()
df = pd.DataFrame({'eventId': re.sub('.*eventid=','',event.resource_id.id),
'time': origin.time,
'latitude': origin.latitude,
'longitude': origin.longitude,
'depth': origin.depth/1000, # IRIS event webservice returns depth in km # TODO: check this
'author': origin.creation_info.author,
'cCatalog': None,
'contributor': None,
'contributorId': None,
'magType': magnitude.magnitude_type,
'magnitude': magnitude.mag,
'magAuthor': magnitude.creation_info.author,
'eventLocationName': event.event_descriptions[0].text},
index=[0])
dataframes.append(df)
# Concatenate into the events dataframe
events = pd.concat(dataframes, ignore_index=True)
else:
# Read from FDSN web services
# TODO: Need to make sure irisseismic.getEvent uses any FDSN site
try:
events = irisseismic.getEvent(starttime=_starttime,
endtime=_endtime,
minmag=minmag,
maxmag=maxmag,
magtype=magtype,
mindepth=mindepth,
maxdepth=maxdepth)
except Exception as e:
err_msg = "The event_url: '%s' returns an error" % (self.event_url)
self.logger.debug(e)
self.logger.error(err_msg)
raise
if events.shape[0] == 0:
return None # TODO: raise an exception
else:
return events
if __name__ == '__main__':
import doctest
doctest.testmod(exclude_empty=True)
| gpl-3.0 |
jreback/pandas | pandas/tests/frame/constructors/test_from_records.py | 2 | 16550 | from datetime import datetime
from decimal import Decimal
import numpy as np
import pytest
import pytz
from pandas.compat import is_platform_little_endian
from pandas import CategoricalIndex, DataFrame, Index, Interval, RangeIndex, Series
import pandas._testing as tm
class TestFromRecords:
def test_from_records_with_datetimes(self):
# this may fail on certain platforms because of a numpy issue
# related GH#6140
if not is_platform_little_endian():
pytest.skip("known failure of test on non-little endian")
# construction with a null in a recarray
# GH#6140
expected = DataFrame({"EXPIRY": [datetime(2005, 3, 1, 0, 0), None]})
arrdata = [np.array([datetime(2005, 3, 1, 0, 0), None])]
dtypes = [("EXPIRY", "<M8[ns]")]
try:
recarray = np.core.records.fromarrays(arrdata, dtype=dtypes)
except (ValueError):
pytest.skip("known failure of numpy rec array creation")
result = DataFrame.from_records(recarray)
tm.assert_frame_equal(result, expected)
# coercion should work too
arrdata = [np.array([datetime(2005, 3, 1, 0, 0), None])]
dtypes = [("EXPIRY", "<M8[m]")]
recarray = np.core.records.fromarrays(arrdata, dtype=dtypes)
result = DataFrame.from_records(recarray)
tm.assert_frame_equal(result, expected)
def test_from_records_sequencelike(self):
df = DataFrame(
{
"A": np.array(np.random.randn(6), dtype=np.float64),
"A1": np.array(np.random.randn(6), dtype=np.float64),
"B": np.array(np.arange(6), dtype=np.int64),
"C": ["foo"] * 6,
"D": np.array([True, False] * 3, dtype=bool),
"E": np.array(np.random.randn(6), dtype=np.float32),
"E1": np.array(np.random.randn(6), dtype=np.float32),
"F": np.array(np.arange(6), dtype=np.int32),
}
)
# this is actually tricky to create the recordlike arrays and
# have the dtypes be intact
blocks = df._to_dict_of_blocks()
tuples = []
columns = []
dtypes = []
for dtype, b in blocks.items():
columns.extend(b.columns)
dtypes.extend([(c, np.dtype(dtype).descr[0][1]) for c in b.columns])
for i in range(len(df.index)):
tup = []
for _, b in blocks.items():
tup.extend(b.iloc[i].values)
tuples.append(tuple(tup))
recarray = np.array(tuples, dtype=dtypes).view(np.recarray)
recarray2 = df.to_records()
lists = [list(x) for x in tuples]
# tuples (lose the dtype info)
result = DataFrame.from_records(tuples, columns=columns).reindex(
columns=df.columns
)
# created recarray and with to_records recarray (have dtype info)
result2 = DataFrame.from_records(recarray, columns=columns).reindex(
columns=df.columns
)
result3 = DataFrame.from_records(recarray2, columns=columns).reindex(
columns=df.columns
)
# list of tupels (no dtype info)
result4 = DataFrame.from_records(lists, columns=columns).reindex(
columns=df.columns
)
tm.assert_frame_equal(result, df, check_dtype=False)
tm.assert_frame_equal(result2, df)
tm.assert_frame_equal(result3, df)
tm.assert_frame_equal(result4, df, check_dtype=False)
# tuples is in the order of the columns
result = DataFrame.from_records(tuples)
tm.assert_index_equal(result.columns, RangeIndex(8))
# test exclude parameter & we are casting the results here (as we don't
# have dtype info to recover)
columns_to_test = [columns.index("C"), columns.index("E1")]
exclude = list(set(range(8)) - set(columns_to_test))
result = DataFrame.from_records(tuples, exclude=exclude)
result.columns = [columns[i] for i in sorted(columns_to_test)]
tm.assert_series_equal(result["C"], df["C"])
tm.assert_series_equal(result["E1"], df["E1"].astype("float64"))
# empty case
result = DataFrame.from_records([], columns=["foo", "bar", "baz"])
assert len(result) == 0
tm.assert_index_equal(result.columns, Index(["foo", "bar", "baz"]))
result = DataFrame.from_records([])
assert len(result) == 0
assert len(result.columns) == 0
def test_from_records_dictlike(self):
# test the dict methods
df = DataFrame(
{
"A": np.array(np.random.randn(6), dtype=np.float64),
"A1": np.array(np.random.randn(6), dtype=np.float64),
"B": np.array(np.arange(6), dtype=np.int64),
"C": ["foo"] * 6,
"D": np.array([True, False] * 3, dtype=bool),
"E": np.array(np.random.randn(6), dtype=np.float32),
"E1": np.array(np.random.randn(6), dtype=np.float32),
"F": np.array(np.arange(6), dtype=np.int32),
}
)
# columns is in a different order here than the actual items iterated
# from the dict
blocks = df._to_dict_of_blocks()
columns = []
for dtype, b in blocks.items():
columns.extend(b.columns)
asdict = {x: y for x, y in df.items()}
asdict2 = {x: y.values for x, y in df.items()}
# dict of series & dict of ndarrays (have dtype info)
results = []
results.append(DataFrame.from_records(asdict).reindex(columns=df.columns))
results.append(
DataFrame.from_records(asdict, columns=columns).reindex(columns=df.columns)
)
results.append(
DataFrame.from_records(asdict2, columns=columns).reindex(columns=df.columns)
)
for r in results:
tm.assert_frame_equal(r, df)
def test_from_records_with_index_data(self):
df = DataFrame(np.random.randn(10, 3), columns=["A", "B", "C"])
data = np.random.randn(10)
df1 = DataFrame.from_records(df, index=data)
tm.assert_index_equal(df1.index, Index(data))
def test_from_records_bad_index_column(self):
df = DataFrame(np.random.randn(10, 3), columns=["A", "B", "C"])
# should pass
df1 = DataFrame.from_records(df, index=["C"])
tm.assert_index_equal(df1.index, Index(df.C))
df1 = DataFrame.from_records(df, index="C")
tm.assert_index_equal(df1.index, Index(df.C))
# should fail
msg = r"Shape of passed values is \(10, 3\), indices imply \(1, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame.from_records(df, index=[2])
with pytest.raises(KeyError, match=r"^2$"):
DataFrame.from_records(df, index=2)
def test_from_records_non_tuple(self):
class Record:
def __init__(self, *args):
self.args = args
def __getitem__(self, i):
return self.args[i]
def __iter__(self):
return iter(self.args)
recs = [Record(1, 2, 3), Record(4, 5, 6), Record(7, 8, 9)]
tups = [tuple(rec) for rec in recs]
result = DataFrame.from_records(recs)
expected = DataFrame.from_records(tups)
tm.assert_frame_equal(result, expected)
def test_from_records_len0_with_columns(self):
# GH#2633
result = DataFrame.from_records([], index="foo", columns=["foo", "bar"])
expected = Index(["bar"])
assert len(result) == 0
assert result.index.name == "foo"
tm.assert_index_equal(result.columns, expected)
def test_from_records_series_list_dict(self):
# GH#27358
expected = DataFrame([[{"a": 1, "b": 2}, {"a": 3, "b": 4}]]).T
data = Series([[{"a": 1, "b": 2}], [{"a": 3, "b": 4}]])
result = DataFrame.from_records(data)
tm.assert_frame_equal(result, expected)
def test_from_records_series_categorical_index(self):
# GH#32805
index = CategoricalIndex(
[Interval(-20, -10), Interval(-10, 0), Interval(0, 10)]
)
series_of_dicts = Series([{"a": 1}, {"a": 2}, {"b": 3}], index=index)
frame = DataFrame.from_records(series_of_dicts, index=index)
expected = DataFrame(
{"a": [1, 2, np.NaN], "b": [np.NaN, np.NaN, 3]}, index=index
)
tm.assert_frame_equal(frame, expected)
def test_frame_from_records_utc(self):
rec = {"datum": 1.5, "begin_time": datetime(2006, 4, 27, tzinfo=pytz.utc)}
# it works
DataFrame.from_records([rec], index="begin_time")
def test_from_records_to_records(self):
# from numpy documentation
arr = np.zeros((2,), dtype=("i4,f4,a10"))
arr[:] = [(1, 2.0, "Hello"), (2, 3.0, "World")]
# TODO(wesm): unused
frame = DataFrame.from_records(arr) # noqa
index = Index(np.arange(len(arr))[::-1])
indexed_frame = DataFrame.from_records(arr, index=index)
tm.assert_index_equal(indexed_frame.index, index)
# without names, it should go to last ditch
arr2 = np.zeros((2, 3))
tm.assert_frame_equal(DataFrame.from_records(arr2), DataFrame(arr2))
# wrong length
msg = r"Shape of passed values is \(2, 3\), indices imply \(1, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame.from_records(arr, index=index[:-1])
indexed_frame = DataFrame.from_records(arr, index="f1")
# what to do?
records = indexed_frame.to_records()
assert len(records.dtype.names) == 3
records = indexed_frame.to_records(index=False)
assert len(records.dtype.names) == 2
assert "index" not in records.dtype.names
def test_from_records_nones(self):
tuples = [(1, 2, None, 3), (1, 2, None, 3), (None, 2, 5, 3)]
df = DataFrame.from_records(tuples, columns=["a", "b", "c", "d"])
assert np.isnan(df["c"][0])
def test_from_records_iterator(self):
arr = np.array(
[(1.0, 1.0, 2, 2), (3.0, 3.0, 4, 4), (5.0, 5.0, 6, 6), (7.0, 7.0, 8, 8)],
dtype=[
("x", np.float64),
("u", np.float32),
("y", np.int64),
("z", np.int32),
],
)
df = DataFrame.from_records(iter(arr), nrows=2)
xp = DataFrame(
{
"x": np.array([1.0, 3.0], dtype=np.float64),
"u": np.array([1.0, 3.0], dtype=np.float32),
"y": np.array([2, 4], dtype=np.int64),
"z": np.array([2, 4], dtype=np.int32),
}
)
tm.assert_frame_equal(df.reindex_like(xp), xp)
# no dtypes specified here, so just compare with the default
arr = [(1.0, 2), (3.0, 4), (5.0, 6), (7.0, 8)]
df = DataFrame.from_records(iter(arr), columns=["x", "y"], nrows=2)
tm.assert_frame_equal(df, xp.reindex(columns=["x", "y"]), check_dtype=False)
def test_from_records_tuples_generator(self):
def tuple_generator(length):
for i in range(length):
letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
yield (i, letters[i % len(letters)], i / length)
columns_names = ["Integer", "String", "Float"]
columns = [
[i[j] for i in tuple_generator(10)] for j in range(len(columns_names))
]
data = {"Integer": columns[0], "String": columns[1], "Float": columns[2]}
expected = DataFrame(data, columns=columns_names)
generator = tuple_generator(10)
result = DataFrame.from_records(generator, columns=columns_names)
tm.assert_frame_equal(result, expected)
def test_from_records_lists_generator(self):
def list_generator(length):
for i in range(length):
letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
yield [i, letters[i % len(letters)], i / length]
columns_names = ["Integer", "String", "Float"]
columns = [
[i[j] for i in list_generator(10)] for j in range(len(columns_names))
]
data = {"Integer": columns[0], "String": columns[1], "Float": columns[2]}
expected = DataFrame(data, columns=columns_names)
generator = list_generator(10)
result = DataFrame.from_records(generator, columns=columns_names)
tm.assert_frame_equal(result, expected)
def test_from_records_columns_not_modified(self):
tuples = [(1, 2, 3), (1, 2, 3), (2, 5, 3)]
columns = ["a", "b", "c"]
original_columns = list(columns)
df = DataFrame.from_records(tuples, columns=columns, index="a") # noqa
assert columns == original_columns
def test_from_records_decimal(self):
tuples = [(Decimal("1.5"),), (Decimal("2.5"),), (None,)]
df = DataFrame.from_records(tuples, columns=["a"])
assert df["a"].dtype == object
df = DataFrame.from_records(tuples, columns=["a"], coerce_float=True)
assert df["a"].dtype == np.float64
assert np.isnan(df["a"].values[-1])
def test_from_records_duplicates(self):
result = DataFrame.from_records([(1, 2, 3), (4, 5, 6)], columns=["a", "b", "a"])
expected = DataFrame([(1, 2, 3), (4, 5, 6)], columns=["a", "b", "a"])
tm.assert_frame_equal(result, expected)
def test_from_records_set_index_name(self):
def create_dict(order_id):
return {
"order_id": order_id,
"quantity": np.random.randint(1, 10),
"price": np.random.randint(1, 10),
}
documents = [create_dict(i) for i in range(10)]
# demo missing data
documents.append({"order_id": 10, "quantity": 5})
result = DataFrame.from_records(documents, index="order_id")
assert result.index.name == "order_id"
# MultiIndex
result = DataFrame.from_records(documents, index=["order_id", "quantity"])
assert result.index.names == ("order_id", "quantity")
def test_from_records_misc_brokenness(self):
# GH#2179
data = {1: ["foo"], 2: ["bar"]}
result = DataFrame.from_records(data, columns=["a", "b"])
exp = DataFrame(data, columns=["a", "b"])
tm.assert_frame_equal(result, exp)
# overlap in index/index_names
data = {"a": [1, 2, 3], "b": [4, 5, 6]}
result = DataFrame.from_records(data, index=["a", "b", "c"])
exp = DataFrame(data, index=["a", "b", "c"])
tm.assert_frame_equal(result, exp)
# GH#2623
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), "hi"]) # test col upconverts to obj
df2_obj = DataFrame.from_records(rows, columns=["date", "test"])
result = df2_obj.dtypes
expected = Series(
[np.dtype("datetime64[ns]"), np.dtype("object")], index=["date", "test"]
)
tm.assert_series_equal(result, expected)
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), 1])
df2_obj = DataFrame.from_records(rows, columns=["date", "test"])
result = df2_obj.dtypes
expected = Series(
[np.dtype("datetime64[ns]"), np.dtype("int64")], index=["date", "test"]
)
tm.assert_series_equal(result, expected)
def test_from_records_empty(self):
# GH#3562
result = DataFrame.from_records([], columns=["a", "b", "c"])
expected = DataFrame(columns=["a", "b", "c"])
tm.assert_frame_equal(result, expected)
result = DataFrame.from_records([], columns=["a", "b", "b"])
expected = DataFrame(columns=["a", "b", "b"])
tm.assert_frame_equal(result, expected)
def test_from_records_empty_with_nonempty_fields_gh3682(self):
a = np.array([(1, 2)], dtype=[("id", np.int64), ("value", np.int64)])
df = DataFrame.from_records(a, index="id")
tm.assert_index_equal(df.index, Index([1], name="id"))
assert df.index.name == "id"
tm.assert_index_equal(df.columns, Index(["value"]))
b = np.array([], dtype=[("id", np.int64), ("value", np.int64)])
df = DataFrame.from_records(b, index="id")
tm.assert_index_equal(df.index, Index([], name="id"))
assert df.index.name == "id"
| bsd-3-clause |
bthirion/scikit-learn | sklearn/manifold/setup.py | 43 | 1283 | import os
from os.path import join
import numpy
from numpy.distutils.misc_util import Configuration
from sklearn._build_utils import get_blas_info
def configuration(parent_package="", top_path=None):
config = Configuration("manifold", parent_package, top_path)
libraries = []
if os.name == 'posix':
libraries.append('m')
config.add_extension("_utils",
sources=["_utils.pyx"],
include_dirs=[numpy.get_include()],
libraries=libraries,
extra_compile_args=["-O3"])
cblas_libs, blas_info = get_blas_info()
eca = blas_info.pop('extra_compile_args', [])
eca.append("-O4")
config.add_extension("_barnes_hut_tsne",
libraries=cblas_libs,
sources=["_barnes_hut_tsne.pyx"],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=eca, **blas_info)
config.add_subpackage('tests')
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration().todict())
| bsd-3-clause |
rajat1994/scikit-learn | sklearn/datasets/__init__.py | 176 | 3671 | """
The :mod:`sklearn.datasets` module includes utilities to load datasets,
including methods to load and fetch popular reference datasets. It also
features some artificial data generators.
"""
from .base import load_diabetes
from .base import load_digits
from .base import load_files
from .base import load_iris
from .base import load_linnerud
from .base import load_boston
from .base import get_data_home
from .base import clear_data_home
from .base import load_sample_images
from .base import load_sample_image
from .covtype import fetch_covtype
from .mlcomp import load_mlcomp
from .lfw import load_lfw_pairs
from .lfw import load_lfw_people
from .lfw import fetch_lfw_pairs
from .lfw import fetch_lfw_people
from .twenty_newsgroups import fetch_20newsgroups
from .twenty_newsgroups import fetch_20newsgroups_vectorized
from .mldata import fetch_mldata, mldata_filename
from .samples_generator import make_classification
from .samples_generator import make_multilabel_classification
from .samples_generator import make_hastie_10_2
from .samples_generator import make_regression
from .samples_generator import make_blobs
from .samples_generator import make_moons
from .samples_generator import make_circles
from .samples_generator import make_friedman1
from .samples_generator import make_friedman2
from .samples_generator import make_friedman3
from .samples_generator import make_low_rank_matrix
from .samples_generator import make_sparse_coded_signal
from .samples_generator import make_sparse_uncorrelated
from .samples_generator import make_spd_matrix
from .samples_generator import make_swiss_roll
from .samples_generator import make_s_curve
from .samples_generator import make_sparse_spd_matrix
from .samples_generator import make_gaussian_quantiles
from .samples_generator import make_biclusters
from .samples_generator import make_checkerboard
from .svmlight_format import load_svmlight_file
from .svmlight_format import load_svmlight_files
from .svmlight_format import dump_svmlight_file
from .olivetti_faces import fetch_olivetti_faces
from .species_distributions import fetch_species_distributions
from .california_housing import fetch_california_housing
from .rcv1 import fetch_rcv1
__all__ = ['clear_data_home',
'dump_svmlight_file',
'fetch_20newsgroups',
'fetch_20newsgroups_vectorized',
'fetch_lfw_pairs',
'fetch_lfw_people',
'fetch_mldata',
'fetch_olivetti_faces',
'fetch_species_distributions',
'fetch_california_housing',
'fetch_covtype',
'fetch_rcv1',
'get_data_home',
'load_boston',
'load_diabetes',
'load_digits',
'load_files',
'load_iris',
'load_lfw_pairs',
'load_lfw_people',
'load_linnerud',
'load_mlcomp',
'load_sample_image',
'load_sample_images',
'load_svmlight_file',
'load_svmlight_files',
'make_biclusters',
'make_blobs',
'make_circles',
'make_classification',
'make_checkerboard',
'make_friedman1',
'make_friedman2',
'make_friedman3',
'make_gaussian_quantiles',
'make_hastie_10_2',
'make_low_rank_matrix',
'make_moons',
'make_multilabel_classification',
'make_regression',
'make_s_curve',
'make_sparse_coded_signal',
'make_sparse_spd_matrix',
'make_sparse_uncorrelated',
'make_spd_matrix',
'make_swiss_roll',
'mldata_filename']
| bsd-3-clause |
heli522/scikit-learn | examples/linear_model/plot_robust_fit.py | 238 | 2414 | """
Robust linear estimator fitting
===============================
Here a sine function is fit with a polynomial of order 3, for values
close to zero.
Robust fitting is demoed in different situations:
- No measurement errors, only modelling errors (fitting a sine with a
polynomial)
- Measurement errors in X
- Measurement errors in y
The median absolute deviation to non corrupt new data is used to judge
the quality of the prediction.
What we can see that:
- RANSAC is good for strong outliers in the y direction
- TheilSen is good for small outliers, both in direction X and y, but has
a break point above which it performs worst than OLS.
"""
from matplotlib import pyplot as plt
import numpy as np
from sklearn import linear_model, metrics
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
np.random.seed(42)
X = np.random.normal(size=400)
y = np.sin(X)
# Make sure that it X is 2D
X = X[:, np.newaxis]
X_test = np.random.normal(size=200)
y_test = np.sin(X_test)
X_test = X_test[:, np.newaxis]
y_errors = y.copy()
y_errors[::3] = 3
X_errors = X.copy()
X_errors[::3] = 3
y_errors_large = y.copy()
y_errors_large[::3] = 10
X_errors_large = X.copy()
X_errors_large[::3] = 10
estimators = [('OLS', linear_model.LinearRegression()),
('Theil-Sen', linear_model.TheilSenRegressor(random_state=42)),
('RANSAC', linear_model.RANSACRegressor(random_state=42)), ]
x_plot = np.linspace(X.min(), X.max())
for title, this_X, this_y in [
('Modeling errors only', X, y),
('Corrupt X, small deviants', X_errors, y),
('Corrupt y, small deviants', X, y_errors),
('Corrupt X, large deviants', X_errors_large, y),
('Corrupt y, large deviants', X, y_errors_large)]:
plt.figure(figsize=(5, 4))
plt.plot(this_X[:, 0], this_y, 'k+')
for name, estimator in estimators:
model = make_pipeline(PolynomialFeatures(3), estimator)
model.fit(this_X, this_y)
mse = metrics.mean_squared_error(model.predict(X_test), y_test)
y_plot = model.predict(x_plot[:, np.newaxis])
plt.plot(x_plot, y_plot,
label='%s: error = %.3f' % (name, mse))
plt.legend(loc='best', frameon=False,
title='Error: mean absolute deviation\n to non corrupt data')
plt.xlim(-4, 10.2)
plt.ylim(-2, 10.2)
plt.title(title)
plt.show()
| bsd-3-clause |
jlegendary/orange | Orange/orng/orngProjectionPursuit.py | 6 | 7987 | import orange
import numpy
import scipy.special
import scipy.optimize
import scipy.stats
from pylab import *
def sqrtm(mat):
""" Retruns the square root of the matrix mat """
U, S, V = numpy.linalg.svd(mat)
D = numpy.diag(numpy.sqrt(S))
return numpy.dot(numpy.dot(U,D),V)
def standardize(mat):
""" Subtracts means and multiplies by diagonal elements of inverse
square root of covariance matrix.
"""
av = numpy.average(mat, axis=0)
sigma = numpy.corrcoef(mat, rowvar=0)
srSigma = sqrtm(sigma)
isrSigma = numpy.linalg.inv(srSigma)
return (mat-av) * numpy.diag(isrSigma)
def friedman_tmp_func(alpha, Z=numpy.zeros((1,1)), J=5, n=1):
alpha = numpy.array(alpha)
pols = [scipy.special.legendre(j) for j in range(0,J+1)]
vals0 = [numpy.dot(alpha.T, Z[i,:]) for i in range(n)]
def f_tmp(x): return 2*x-1
vals = map(f_tmp, map(scipy.stats.zprob, vals0))
val = [1./n*sum(map(p, vals))**2 for p in pols]
return vals, pols, - 0.5 * sum([(2*j+1)*v for j, v in enumerate(val)])
class ProjectionPursuit:
FRIEDMAN = 0
MOMENT = 1
SILHUETTE = 2
HARTINGAN = 3
def __init__(self, data, index = FRIEDMAN, dim=2, maxiter=10):
self.dim = dim
if type(data) == orange.ExampleTable:
self.dataNP = data.toNumpy()[0] # TODO: check if conversion of discrete values works ok
else:
self.dataNP = data
self.Z = standardize(self.dataNP)
self.totalSize, self.nVars = numpy.shape(self.Z)
self.maxiter = maxiter
self.currentOptimum = None
self.index = index
def optimize(self, maxiter = 5, opt_method=scipy.optimize.fmin):
func = self.getIndex()
if self.currentOptimum != None:
x = self.currentOptimum
else:
x = numpy.random.rand(self.dim * self.nVars)
alpha = opt_method(func, x, maxiter=maxiter).reshape(self.dim * self.nVars,1)
self.currentOptimum = alpha
print alpha, len(alpha)
optValue = func(alpha)
if self.dim == 2:
alpha1 = alpha[:self.nVars]
alpha2 = alpha[self.nVars:]
alpha = numpy.append(alpha1, alpha2, axis=1)
projectedData = numpy.dot(self.Z, alpha)
return alpha, optValue, projectedData
def find_optimum(self, opt_method=scipy.optimize.fmin):
func = self.getIndex()
alpha = opt_method(func, \
numpy.random.rand(self.dim * self.nVars),\
maxiter=self.maxiter).reshape(self.dim * self.nVars,1)
print alpha, len(alpha)
optValue = func(alpha)
if self.dim == 2:
alpha1 = alpha[:self.nVars]
alpha2 = alpha[self.nVars:]
alpha = numpy.append(alpha1, alpha2, axis=1)
projectedData = numpy.dot(self.Z, alpha)
return alpha, optValue, projectedData
def getIndex(self):
if self.index == self.FRIEDMAN:
return self.getFriedmanIndex()
elif self.index == self.MOMENT:
return self.getMomentIndex()
elif self.index == self.SILHUETTE:
return self.getSilhouetteBasedIndex()
elif self.index == self.HARTINGAN:
return self.getHartinganBasedIndex()
def getFriedmanIndex(self, J=5):
if self.dim == 1:
def func(alpha, Z=self.Z, J=J, n=self.totalSize):
vals, pols, val = friedman_tmp_func(alpha, Z=Z, J=J, n=n)
return val
elif self.dim == 2:
def func(alpha, Z=self.Z, J=J, n=self.totalSize):
alpha1, alpha2 = alpha[:self.nVars], alpha[self.nVars:]
vals1, pols, val1 = friedman_tmp_func(alpha1, Z=Z, J=J, n=n)
vals2, pols, val2 = friedman_tmp_func(alpha2, Z=Z, J=J, n=n)
val12 = - 0.5 * sum([sum([(2*j+1)*(2*k+1)*vals1[j]*vals2[k] for k in range(0, J+1-j)]) \
for j in range(0,J+1)])
## print val1, val2
return 0.5 * (val1 + val2 + val12)
return func
def getMomentIndex(self): # lahko dodas faktor 1./12
if self.dim == 1:
def func(alpha):
smpl = numpy.dot(self.Z, alpha)
return scipy.stats.kstat(smpl, n=3) ** 2 + 0.25 * scipy.stats.kstat(smpl, n=4)
else:
print "To do."
return func
def getSilhouetteBasedIndex(self, nClusters=5):
import orngClustering
def func(alpha, nClusters=nClusters):
alpha1, alpha2 = alpha[:self.nVars], alpha[self.nVars:]
alpha1 = alpha1.reshape((self.nVars,1))
alpha2 = alpha2.reshape(self.nVars,1)
alpha = numpy.append(alpha1, alpha2, axis=1)
smpl = numpy.dot(self.Z, alpha)
smpl = orange.ExampleTable(smpl)
km = orngClustering.KMeans(smpl, centroids=nClusters)
score = orngClustering.score_silhouette(km)
return -score
import functools
silhIndex = functools.partial(func, nClusters=nClusters)
return silhIndex
def getHartinganBasedIndex(self, nClusters=5):
import orngClustering
def func(alpha, nClusters=nClusters):
alpha1, alpha2 = alpha[:self.nVars], alpha[self.nVars:]
alpha1 = alpha1.reshape((self.nVars,1))
alpha2 = alpha2.reshape(self.nVars,1)
alpha = numpy.append(alpha1, alpha2, axis=1)
smpl = numpy.dot(self.Z, alpha)
smpl = orange.ExampleTable(smpl)
km1 = orngClustering.KMeans(smpl, centroids=nClusters)
km2 = orngClustering.KMeans(smpl, centroids=nClusters)
score = (self.totalSize - nClusters - 1) * (km1.score-km2.score) / (km2.score)
return -score
import functools
hartinganIndex = functools.partial(func, nClusters=nClusters)
return hartinganIndex
def draw_scatter_hist(x,y, fileName="lala.png"):
from matplotlib.ticker import NullFormatter
nullfmt = NullFormatter() # no labels
clf()
# definitions for the axes
left, width = 0.1, 0.65
bottom, height = 0.1, 0.65
bottom_h = left_h = left+width+0.02
rect_scatter = [left, bottom, width, height]
rect_histx = [left, bottom_h, width, 0.2]
rect_histy = [left_h, bottom, 0.2, height]
# start with a rectangular Figure
figure(1, figsize=(8,8))
axScatter = axes(rect_scatter)
axHistx = axes(rect_histx)
axHisty = axes(rect_histy)
# no labels
axHistx.xaxis.set_major_formatter(nullfmt)
axHisty.yaxis.set_major_formatter(nullfmt)
# the scatter plot:
axScatter.scatter(x, y)
# now determine nice limits by hand:
binwidth = 0.25
xymax = numpy.max([numpy.max(np.fabs(x)), numpy.max(np.fabs(y))])
lim = (int(xymax/binwidth) + 1) * binwidth
axScatter.set_xlim( (-lim, lim) )
axScatter.set_ylim( (-lim, lim) )
bins = numpy.arange(-lim, lim + binwidth, binwidth)
axHistx.hist(x, bins=bins)
axHisty.hist(y, bins=bins, orientation='horizontal')
axHistx.set_xlim(axScatter.get_xlim())
axHisty.set_ylim(axScatter.get_ylim())
savefig(fileName)
if __name__=="__main__":
## data = orange.ExampleTable("c:\\Work\\Subgroup discovery\\iris.tab")
data = orange.ExampleTable(r"E:\Development\Orange Datasets\UCI\iris.tab")
data = data.select(data.domain.attributes)
impmin = orange.ImputerConstructor_minimal(data)
data = impmin(data)
ppy = ProjectionPursuit(data, dim=2, maxiter=100)
#ppy.friedman_index(J=5)
#ppy.silhouette_based_index(nClusters=2)
## import os
## os.chdir("C:\\Work\\Subgroup discovery")
#draw_scatter_hist(ppy.friedmanProjData[:,0], ppy.friedmanProjData[:,1])
#draw_scatter_hist(ppy.silhouetteProjData[:,0], ppy.silhouetteProjData[:,1])
print ppy.optimize()
| gpl-3.0 |
protoplanet/raytracing | ray_main.py | 1 | 8253 | # ------------------------------------------------------------------------------------------ #
# Description : Python implementation of ray tracing equations stated in PhD thesis of Rice (1997)
# Electron/proton stratification according to Yabroff (1961) + IRI model available
# Geometry is 2D polar
#
# Author : Miroslav Mocak
# Date : 14/October/2016
# Usage : run ray_main.py (this code is OPEN-SOURCE and free to be used/modified by anyone)
# References : Rice W.K.M, 1997, "A ray tracing study of VLF phenomena", PhD thesis,
# : Space Physics Research Institute, Department of Physics, University of Natal
# : Yabroff (1961), Kimura (1966)
# ------------------------------------------------------------------------------------------ #
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import ode
import warnings
import re # python regular expressions
import ray_cmks
import ray_fncts
import ray_plot
import sys
warnings.filterwarnings('ignore')
# ---------------------------------------------- #
# READ INPUT PARAMETERS AND INITIAL CONDITIONS
# ---------------------------------------------- #
file=open('ray_param.dat','r')
next(file) # skip header line
next(file) # skip header line
input=[]
for line in file:
prsvalue = re.search(r'\[(.*)\]', line).group(1) # parse out values from square brackets
input.append(prsvalue)
file.close()
freq_in = float(input[0])
freq_en = float(input[1])
freq_de = float(input[2])
orbit = float(input[3])
frr0 = float(input[4])
dth0 = float(input[5])
dchi0 = float(input[6])
tt0 = float(input[7])
tstop = float(input[8])
nsteps = float(input[9])
pvode = float(input[10])
pzvode = float(input[11])
plsoda = float(input[12])
pdopri5 = float(input[13])
pdop853 = float(input[14])
iono_el = float(input[15])
iono_np = float(input[16])
iono_ir = float(input[17])
iri_fna = input[18]
pwhist = float(input[19])
if (iono_el == 1. and iono_np == 1.) or (iono_el == 1. and iono_ir == 1.) or (iono_np == 1. and iono_ir == 1.):
print('STOP (in ray_main.py): choose only one ionospheric model')
sys.exit()
# load constants
ray_cmks.pconstants()
rr0 = frr0*ray_cmks.Re # initial altitude :: approx 300 km = 1.0471*Re
th0 = dth0*np.pi/180.
chi0 = dchi0*np.pi/180.
G0 = tt0
t0 = 0.
dt = tstop/nsteps # calculate integration step
# bundle initial conditions
rtcG0 = [rr0,th0,chi0,G0]
# introduce some error handling !!
# select chosen ionospheric model
ion = ["0","0"]
# initialize ionosphere
height = []
ne = []
H_ions_per = []
O_ions_per = []
He_ions_per = []
O2_ions_per = []
NO_ions_per = []
N_ions_per = []
ionosphere = [height,ne,O_ions_per,H_ions_per,He_ions_per,O2_ions_per,NO_ions_per,N_ions_per]
if iono_el == 1:
fname = ""
ion = ["iono_el",fname]
if iono_np == 1:
fname = ""
ion = ["iono_np",fname]
if iono_ir == 1:
# fname = "iri_2012_24537_night.lst"
# fname = "iri_2012_25962_day.lst"
fname = iri_fna
# print(fname)
ion = ["iono_ir",fname]
height = []
ne = []
O_ions_per = []
H_ions_per = []
He_ions_per = []
O2_ions_per = []
NO_ions_per = []
N_ions_per = []
# Open file
# fname = 'iri_2012_22651_night.lst'
fname = ion[1]
f = open('IRI/'+fname, 'r')
# Loop over lines and extract variables of interest
for line in f:
line = line.strip()
columns = line.split()
# if float(columns[5]) = -1.:
# columns[5] = 0.
if float(columns[8]) < 0.:
columns[8] = 0.
if float(columns[9]) < 0.:
columns[9] = 0.
if float(columns[10]) < 0.:
columns[10] = 0.
if float(columns[11]) < 0.:
columns[11] = 0.
if float(columns[12]) < 0.:
columns[12] = 0.
if float(columns[13]) < 0.:
columns[13] = 0.
if float(columns[14]) < 0.:
columns[14] = 0.
height.append(float(columns[5])) # height in km
ne.append(float(columns[8])) # electron density in m-3
O_ions_per.append(float(columns[9])) # atomic oxygen O+ ions percentage
H_ions_per.append(float(columns[10])) # atomic hydrogen H+ ions percentage
He_ions_per.append(float(columns[11])) # atomic helium He+ ions percentage
O2_ions_per.append(float(columns[12])) # molecular oxygen O2+ ions percentage
NO_ions_per.append(float(columns[13])) # nitric oxide ions NO+ percentage
N_ions_per.append(float(columns[14])) # atomic nitrogen N+ ions percentage
f.close()
if np.asarray(height[-1]) < orbit:
print('STOP (in ray_main.py): limiting orbit exceeds max altitude of IRI model')
sys.exit()
ionosphere = [height,ne,O_ions_per,H_ions_per,He_ions_per,O2_ions_per,NO_ions_per,N_ions_per]
#print(height[0],rr0-6371200.0,height[-1])
if ion == ["0","0"]:
print("Error in ionospheric model")
# ---------------------------------------------- #
# CALCULATE RHS OF RAY TRACING EQUATIONS
# ---------------------------------------------- #
def f(t, rtcG, freq):
rr, th, chi, G = rtcG # unpack current values
c = 2.99792458e8
n = ray_fncts.phase_refractive_index(rr,th,chi,freq,ion,ionosphere)
dndrr = ray_fncts.deriv_rr(rr,th,chi,freq,ion,ionosphere)
dndth = ray_fncts.deriv_th(rr,th,chi,freq,ion,ionosphere)
dndfr = ray_fncts.deriv_fr(rr,th,chi,freq,ion,ionosphere)
dndch = -n[1]
# ngroup = n[0]+freq*dndfr
derivs = [(1./n[0]**2)*(n[0]*np.cos(chi) + dndch*np.sin(chi)),
(1./(rr*n[0]**2))*(n[0]*np.sin(chi) - dndch*np.cos(chi)),
(1./(rr*n[0]**2))*(dndth*np.cos(chi) - (rr*dndrr+n[0])*np.sin(chi)),
(1./c)*(1.+(freq/n[0])*dndfr)]
return derivs
# ---------------------------------------------- #
# MAIN CALLS ODE SOLVER AND STORES RESULTS
# ---------------------------------------------- #
if pvode == 1:
intype="vode"
if pzvode == 1:
intype="zvode"
if plsoda == 1:
intype="lsoda"
if pdopri5 == 1:
intype="dopri5"
if pdop853 == 1:
intype="dop853"
print('Using ODE integrator: '+str(intype))
print('Limiting height: '+str(orbit)+str(' km'))
# set parameters for plotting
ray_plot.SetMatplotlibParams()
fd = freq_de
fend = int((freq_en-freq_in)/freq_de)
# initial array for frequency and group delay time at chosen orbit
freqb = []
gdtb = []
nphaseb = []
for ii in range(1,fend+1):
freq = freq_in+(ii-1)*fd # vary frequency
print('Calculating ray path: '+str("%.2g" % freq)+' Hz')
psoln = ode(f).set_integrator(intype,method='bdf')
psoln.set_initial_value(rtcG0,t0).set_f_params(freq)
radius = []
latitude = []
gdt = []
nphase = []
while psoln.successful() and psoln.t < tstop and psoln.y[0] > ray_cmks.Re and psoln.y[0] < (ray_cmks.Re+orbit*1.e3):
psoln.integrate(psoln.t+dt)
radius.append(psoln.y[0])
latitude.append(psoln.y[1])
gdt.append(psoln.y[3])
nphase_single = ray_fncts.phase_refractive_index(psoln.y[0],psoln.y[1],psoln.y[2],freq,ion,ionosphere)
nphase.append(nphase_single[0])
# print(ray_fncts.phase_refractive_index(psoln.y[0],psoln.y[1],psoln.y[2],freq,ion,ionosphere))
# print(psoln.y[2],(180./np.pi)*psoln.y[2])
xx = radius[:]*np.cos(latitude[:])
yy = radius[:]*np.sin(latitude[:])
freqb.append(freq)
gdtb.append(gdt[-1])
nphaseb.append(nphase)
ray_plot.finPlot(radius,latitude,gdt,freq,dth0,dchi0,ion,ii)
# ---------------------------------------------- #
# ray_plot RESULTS
# ---------------------------------------------- #
if pwhist == 1:
ray_plot.finGdt(radius,latitude,gdtb,freqb,dth0,dchi0,ion)
# ray_plot.finNphase(radius,latitude,gdtb,freqb,nphase,dth0,dchi0,ion)
plt.show()
plt.clf()
# ---------------------------------------------- #
# END
# ---------------------------------------------- #
| gpl-3.0 |
GAMPTeam/vampyre | test/test_sparse/test_mlvamp_probit.py | 1 | 5993 | from __future__ import division
"""
test_vamp_probit.py: tests for ML-VAMP for probit estimation
"""
# Add the path to the vampyre package and import it
import env
env.add_vp_path()
import vampyre as vp
# Add other packages
import numpy as np
import unittest
def debias_mse(zhat,ztrue):
"""
If zhat and ztrue are 1D vectors, the function computes the *debiased normalized MSE* defined as:
dmse_lin = min_c ||ztrue-c*zhat||^2/||ztrue||^2 = (1-|zhat'*ztrue|^2/||ztrue||^2||zhat||^2)
The function returns the value in dB: dmse = 10*log10(dmse_lin)
If zhat and ztrue are matrices, dmse_lin is computed for each column and then averaged over the columns
"""
zcorr = np.abs(np.sum(zhat.conj()*ztrue,axis=0))**2
zhatpow = np.sum(np.abs(zhat)**2,axis=0)
zpow = np.sum(np.abs(ztrue)**2,axis=0)
tol = 1e-8
if np.any(zhatpow < tol) or np.any(zpow < tol):
dmse = 0
else:
dmse = 10*np.log10(np.mean(1 - zcorr/zhatpow/zpow))
return dmse
def probit_test(nz0=512,nz1=4096,ncol=10, snr=30, verbose=False, plot=False,\
est_meth='cg', nit_cg=10, mse_tol=-20):
"""
Test VAMP on a sparse probit estimation problem
In this test, the input :math:`z_0` is a Bernoulli-Gaussian and
:math:`z_1=Az_0+w` where :math:`w` is Gaussian noise and :math:`A` is an
i.i.d. Gaussian matrix. The problem is to estimate :math:`z_0` from
binary measurements :math:`y=sign(z_1)`. This is equivalent to sparse
probit estimation in statistics.
:param nz0: number of rows of :math:`z_0`
:param nz1: number of rows of :math:`z_1`
:param ncol: number of columns of :math:`z_1` and :math:`z_0`
:param snr: SNR in dB
:param Boolean verbose: Flag indicating if the test results are
to be printed.
:param Boolean plot: Flag indicating if the test results are
to be plot
:param est_meth: Estimation method. Either `svd` or `cg`
:param nit_cg: number of CG iterations
:param mse_tol: MSE must be below this value for test to pass.
"""
# Parameters
map_est = False
sparse_rat = 0.1
# Compute the dimensions
ny = nz1
if (ncol==1):
zshape0 = (nz0,)
zshape1 = (nz1,)
yshape = (ny,)
else:
zshape0 = (nz0,ncol)
zshape1 = (nz1,ncol)
yshape = (ny,ncol)
Ashape = (nz1,nz0)
# Generate random input z
#np.random.seed(40)
zpowtgt = 2
zmean0 = 0
zvar0 = zpowtgt/sparse_rat
z0 = np.random.normal(zmean0,np.sqrt(zvar0),zshape0)
u = np.random.uniform(0,1,zshape0) < sparse_rat
z0 = z0*u
zpow = np.mean(z0**2,axis=0)
if (ncol > 1):
zpow = zpow[None,:]
z0 = z0*np.sqrt(zpowtgt/zpow)
# Create a random transform
A = np.random.normal(0,np.sqrt(1/nz0), Ashape)
b = np.random.normal(0,1,zshape1)
# Lienar transform
Az0 = A.dot(z0) + b
wvar = np.power(10,-0.1*snr)*np.mean(np.abs(Az0)**2)
z1 = Az0 + np.random.normal(0,np.sqrt(wvar),yshape)
# Signed output
thresh = 0
y = (z1 > thresh)
# Create estimators for the input and output of the transform
est0_gauss = vp.estim.GaussEst(zmean0,zvar0,zshape0,map_est=map_est)
est0_dis = vp.estim.DiscreteEst(0,1,zshape0)
est_in = vp.estim.MixEst([est0_gauss,est0_dis],[sparse_rat,1-sparse_rat],\
name='Input')
est_out = vp.estim.BinaryQuantEst(y,yshape,thresh=thresh, name='Output')
# Estimtor for the linear transform
Aop = vp.trans.MatrixLT(A,zshape0)
est_lin = vp.estim.LinEstTwo(Aop,b,wvar,est_meth=est_meth,nit_cg=nit_cg,\
name ='Linear')
# List of the estimators
est_list = [est_in,est_lin,est_out]
# Create the message handler
damp=1
msg_hdl0 = vp.estim.MsgHdlSimp(map_est=map_est, shape=zshape0,damp=damp)
msg_hdl1 = vp.estim.MsgHdlSimp(map_est=map_est, shape=zshape1,damp=damp)
msg_hdl_list = [msg_hdl0,msg_hdl1]
ztrue = [z0,z1]
solver = vp.solver.mlvamp.MLVamp(est_list,msg_hdl_list,comp_cost=True,\
hist_list=['zhat','zhatvar'])
# Run the solver
solver.solve()
# Get the estimates and predicted variances
zhat_hist = solver.hist_dict['zhat']
zvar_hist = solver.hist_dict['zhatvar']
# Compute per iteration errors
nvar = len(ztrue)
nit2 = len(zhat_hist)
mse_act = np.zeros((nit2,nvar))
mse_pred = np.zeros((nit2,nvar))
for ivar in range(nvar):
zpowi = np.mean(np.abs(ztrue[ivar])**2, axis=0)
for it in range(nit2):
zhati = zhat_hist[it][ivar]
zhatvari = zvar_hist[it][ivar]
mse_act[it,ivar] = debias_mse(zhati,ztrue[ivar])
mse_pred[it,ivar] = 10*np.log10(np.mean(zhatvari/zpowi))
# Check failure
fail = np.any(mse_act[-1,:] > mse_tol)
# Display the final MSE
if verbose or fail:
print("z0 mse: act: {0:7.2f} pred: {1:7.2f}".format(\
mse_act[-1,0],mse_pred[-1,0]))
print("z1 mse: act: {0:7.2f} pred: {1:7.2f}".format(\
mse_act[-1,1],mse_pred[-1,1]))
if plot:
import matplotlib.pyplot as plt
t = np.array(range(nit2))
for ivar in range(nvar):
plt.subplot(1,nvar,ivar+1)
zpow = np.mean(abs(ztrue[ivar])**2)
plt.plot(t, mse_act[:,ivar], 's-')
plt.plot(t, mse_pred[:,ivar], 'o-')
plt.legend(['true','pred'])
if fail:
raise vp.common.VpException("Final MSE higher than expected")
class TestCases(unittest.TestCase):
def test_mlvamp_sparse_probit(self):
"""
Calls the probit estimation test case
"""
#probit_test(ncol=10,est_meth='cg')
probit_test(ncol=10,est_meth='svd',plot=False)
if __name__ == '__main__':
unittest.main()
| mit |
christophreimer/pygeobase | pygeobase/object_base.py | 1 | 4070 | # Copyright (c) 2015, Vienna University of Technology, Department of Geodesy
# and Geoinformation. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Vienna University of Technology, Department of
# Geodesy and Geoinformation nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL VIENNA UNIVERSITY OF TECHNOLOGY,
# DEPARTMENT OF GEODESY AND GEOINFORMATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import pandas as pd
import numpy as np
class TS(object):
"""
The TS class represents the base object of a time series.
"""
def __init__(self, gpi, lon, lat, data, metadata):
"""
Initialization of the time series object.
Parameters
----------
lon : float
Longitude of the time series
lat : float
Latitude of the time series
data : pandas.DataFrame
Pandas DataFrame that holds data for each variable of the time
series
metadata : dict
dictionary that holds metadata
"""
self.gpi = gpi
self.lon = lon
self.lat = lat
self.data = data
self.metadata = metadata
def __repr__(self):
return "Time series gpi:%d lat:%2.3f lon:%3.3f" % (self.gpi,
self.lat,
self.lon)
def plot(self, *args, **kwargs):
"""
wrapper for pandas.DataFrame.plot which adds title to plot
and drops NaN values for plotting
Returns
-------
ax : axes
matplotlib axes of the plot
"""
tempdata = self.data.dropna(how='all')
ax = tempdata.plot(*args, figsize=(15, 5), **kwargs)
ax.set_title(self.__repr__())
return ax
class Image(object):
"""
The Image class represents the base object of an image.
"""
def __init__(self, lon, lat, data, metadata, timestamp, timekey='jd'):
"""
Initialization of the image object.
Parameters
----------
lon : numpy.array
array of longitudes
lat : numpy.array
array of latitudes
data : dict
dictionary of numpy arrays that holds the image data for each
variable of the dataset
metadata : dict
dictionary that holds metadata
timestamp : datetime.datetime
exact timestamp of the image
timekey : str, optional
Key of the time variable, if available, stored in data dictionary.
"""
self.lon = lon
self.lat = lat
self.data = data
self.metadata = metadata
self.timestamp = timestamp
self.timekey = timekey
| bsd-3-clause |
466152112/scikit-learn | sklearn/svm/tests/test_svm.py | 116 | 31653 | """
Testing for Support Vector Machine module (sklearn.svm)
TODO: remove hard coded numerical results when possible
"""
import numpy as np
import itertools
from numpy.testing import assert_array_equal, assert_array_almost_equal
from numpy.testing import assert_almost_equal
from scipy import sparse
from nose.tools import assert_raises, assert_true, assert_equal, assert_false
from sklearn.base import ChangedBehaviorWarning
from sklearn import svm, linear_model, datasets, metrics, base
from sklearn.cross_validation import train_test_split
from sklearn.datasets import make_classification, make_blobs
from sklearn.metrics import f1_score
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.utils import check_random_state
from sklearn.utils import ConvergenceWarning
from sklearn.utils.validation import NotFittedError
from sklearn.utils.testing import assert_greater, assert_in, assert_less
from sklearn.utils.testing import assert_raises_regexp, assert_warns
from sklearn.utils.testing import assert_warns_message, assert_raise_message
from sklearn.utils.testing import ignore_warnings
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
Y = [1, 1, 1, 2, 2, 2]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [1, 2, 2]
# also load the iris dataset
iris = datasets.load_iris()
rng = check_random_state(42)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_libsvm_parameters():
# Test parameters on classes that make use of libsvm.
clf = svm.SVC(kernel='linear').fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.support_vectors_, (X[1], X[3]))
assert_array_equal(clf.intercept_, [0.])
assert_array_equal(clf.predict(X), Y)
def test_libsvm_iris():
# Check consistency on dataset iris.
# shuffle the dataset so that labels are not ordered
for k in ('linear', 'rbf'):
clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
assert_greater(np.mean(clf.predict(iris.data) == iris.target), 0.9)
assert_array_equal(clf.classes_, np.sort(clf.classes_))
# check also the low-level API
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64))
pred = svm.libsvm.predict(iris.data, *model)
assert_greater(np.mean(pred == iris.target), .95)
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64),
kernel='linear')
pred = svm.libsvm.predict(iris.data, *model, kernel='linear')
assert_greater(np.mean(pred == iris.target), .95)
pred = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_greater(np.mean(pred == iris.target), .95)
# If random_seed >= 0, the libsvm rng is seeded (by calling `srand`), hence
# we should get deteriministic results (assuming that there is no other
# thread calling this wrapper calling `srand` concurrently).
pred2 = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_array_equal(pred, pred2)
def test_single_sample_1d():
# Test whether SVCs work on a single sample given as a 1-d array
clf = svm.SVC().fit(X, Y)
clf.predict(X[0])
clf = svm.LinearSVC(random_state=0).fit(X, Y)
clf.predict(X[0])
def test_precomputed():
# SVC with a precomputed kernel.
# We test it with a toy dataset and with iris.
clf = svm.SVC(kernel='precomputed')
# Gram matrix for train data (square matrix)
# (we use just a linear kernel)
K = np.dot(X, np.array(X).T)
clf.fit(K, Y)
# Gram matrix for test data (rectangular matrix)
KT = np.dot(T, np.array(X).T)
pred = clf.predict(KT)
assert_raises(ValueError, clf.predict, KT.T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
KT = np.zeros_like(KT)
for i in range(len(T)):
for j in clf.support_:
KT[i, j] = np.dot(T[i], X[j])
pred = clf.predict(KT)
assert_array_equal(pred, true_result)
# same as before, but using a callable function instead of the kernel
# matrix. kernel is just a linear kernel
kfunc = lambda x, y: np.dot(x, y.T)
clf = svm.SVC(kernel=kfunc)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# test a precomputed kernel with the iris dataset
# and check parameters against a linear SVC
clf = svm.SVC(kernel='precomputed')
clf2 = svm.SVC(kernel='linear')
K = np.dot(iris.data, iris.data.T)
clf.fit(K, iris.target)
clf2.fit(iris.data, iris.target)
pred = clf.predict(K)
assert_array_almost_equal(clf.support_, clf2.support_)
assert_array_almost_equal(clf.dual_coef_, clf2.dual_coef_)
assert_array_almost_equal(clf.intercept_, clf2.intercept_)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
K = np.zeros_like(K)
for i in range(len(iris.data)):
for j in clf.support_:
K[i, j] = np.dot(iris.data[i], iris.data[j])
pred = clf.predict(K)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
clf = svm.SVC(kernel=kfunc)
clf.fit(iris.data, iris.target)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
def test_svr():
# Test Support Vector Regression
diabetes = datasets.load_diabetes()
for clf in (svm.NuSVR(kernel='linear', nu=.4, C=1.0),
svm.NuSVR(kernel='linear', nu=.4, C=10.),
svm.SVR(kernel='linear', C=10.),
svm.LinearSVR(C=10.),
svm.LinearSVR(C=10.),
):
clf.fit(diabetes.data, diabetes.target)
assert_greater(clf.score(diabetes.data, diabetes.target), 0.02)
# non-regression test; previously, BaseLibSVM would check that
# len(np.unique(y)) < 2, which must only be done for SVC
svm.SVR().fit(diabetes.data, np.ones(len(diabetes.data)))
svm.LinearSVR().fit(diabetes.data, np.ones(len(diabetes.data)))
def test_linearsvr():
# check that SVR(kernel='linear') and LinearSVC() give
# comparable results
diabetes = datasets.load_diabetes()
lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target)
score1 = lsvr.score(diabetes.data, diabetes.target)
svr = svm.SVR(kernel='linear', C=1e3).fit(diabetes.data, diabetes.target)
score2 = svr.score(diabetes.data, diabetes.target)
assert np.linalg.norm(lsvr.coef_ - svr.coef_) / np.linalg.norm(svr.coef_) < .1
assert np.abs(score1 - score2) < 0.1
def test_svr_errors():
X = [[0.0], [1.0]]
y = [0.0, 0.5]
# Bad kernel
clf = svm.SVR(kernel=lambda x, y: np.array([[1.0]]))
clf.fit(X, y)
assert_raises(ValueError, clf.predict, X)
def test_oneclass():
# Test OneClassSVM
clf = svm.OneClassSVM()
clf.fit(X)
pred = clf.predict(T)
assert_array_almost_equal(pred, [-1, -1, -1])
assert_array_almost_equal(clf.intercept_, [-1.008], decimal=3)
assert_array_almost_equal(clf.dual_coef_,
[[0.632, 0.233, 0.633, 0.234, 0.632, 0.633]],
decimal=3)
assert_raises(ValueError, lambda: clf.coef_)
def test_oneclass_decision_function():
# Test OneClassSVM decision function
clf = svm.OneClassSVM()
rnd = check_random_state(2)
# Generate train data
X = 0.3 * rnd.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * rnd.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = rnd.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
# predict things
y_pred_test = clf.predict(X_test)
assert_greater(np.mean(y_pred_test == 1), .9)
y_pred_outliers = clf.predict(X_outliers)
assert_greater(np.mean(y_pred_outliers == -1), .9)
dec_func_test = clf.decision_function(X_test)
assert_array_equal((dec_func_test > 0).ravel(), y_pred_test == 1)
dec_func_outliers = clf.decision_function(X_outliers)
assert_array_equal((dec_func_outliers > 0).ravel(), y_pred_outliers == 1)
def test_tweak_params():
# Make sure some tweaking of parameters works.
# We change clf.dual_coef_ at run time and expect .predict() to change
# accordingly. Notice that this is not trivial since it involves a lot
# of C/Python copying in the libsvm bindings.
# The success of this test ensures that the mapping between libsvm and
# the python classifier is complete.
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-.25, .25]])
assert_array_equal(clf.predict([[-.1, -.1]]), [1])
clf._dual_coef_ = np.array([[.0, 1.]])
assert_array_equal(clf.predict([[-.1, -.1]]), [2])
def test_probability():
# Predict probabilities using SVC
# This uses cross validation, so we use a slightly bigger testing set.
for clf in (svm.SVC(probability=True, random_state=0, C=1.0),
svm.NuSVC(probability=True, random_state=0)):
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(
np.sum(prob_predict, 1), np.ones(iris.data.shape[0]))
assert_true(np.mean(np.argmax(prob_predict, 1)
== clf.predict(iris.data)) > 0.9)
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8)
def test_decision_function():
# Test decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
# multi class:
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(iris.data, iris.target)
dec = np.dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int)])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
# kernel binary:
clf = svm.SVC(kernel='rbf', gamma=1, decision_function_shape='ovo')
clf.fit(X, Y)
rbfs = rbf_kernel(X, clf.support_vectors_, gamma=clf.gamma)
dec = np.dot(rbfs, clf.dual_coef_.T) + clf.intercept_
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
def test_decision_function_shape():
# check that decision_function_shape='ovr' gives
# correct shape and is consistent with predict
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(iris.data, iris.target)
dec = clf.decision_function(iris.data)
assert_equal(dec.shape, (len(iris.data), 3))
assert_array_equal(clf.predict(iris.data), np.argmax(dec, axis=1))
# with five classes:
X, y = make_blobs(n_samples=80, centers=5, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(X_train, y_train)
dec = clf.decision_function(X_test)
assert_equal(dec.shape, (len(X_test), 5))
assert_array_equal(clf.predict(X_test), np.argmax(dec, axis=1))
# check shape of ovo_decition_function=True
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(X_train, y_train)
dec = clf.decision_function(X_train)
assert_equal(dec.shape, (len(X_train), 10))
# check deprecation warning
clf.decision_function_shape = None
msg = "change the shape of the decision function"
dec = assert_warns_message(ChangedBehaviorWarning, msg,
clf.decision_function, X_train)
assert_equal(dec.shape, (len(X_train), 10))
def test_svr_decision_function():
# Test SVR's decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
X = iris.data
y = iris.target
# linear kernel
reg = svm.SVR(kernel='linear', C=0.1).fit(X, y)
dec = np.dot(X, reg.coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.decision_function(X).ravel())
# rbf kernel
reg = svm.SVR(kernel='rbf', gamma=1).fit(X, y)
rbfs = rbf_kernel(X, reg.support_vectors_, gamma=reg.gamma)
dec = np.dot(rbfs, reg.dual_coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.decision_function(X).ravel())
def test_weight():
# Test class weights
clf = svm.SVC(class_weight={1: 0.1})
# we give a small weights to class 1
clf.fit(X, Y)
# so all predicted values belong to class 2
assert_array_almost_equal(clf.predict(X), [2] * 6)
X_, y_ = make_classification(n_samples=200, n_features=10,
weights=[0.833, 0.167], random_state=2)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0), svm.SVC()):
clf.set_params(class_weight={0: .1, 1: 10})
clf.fit(X_[:100], y_[:100])
y_pred = clf.predict(X_[100:])
assert_true(f1_score(y_[100:], y_pred) > .3)
def test_sample_weights():
# Test weights on individual samples
# TODO: check on NuSVR, OneClass, etc.
clf = svm.SVC()
clf.fit(X, Y)
assert_array_equal(clf.predict(X[2]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X[2]), [2.])
# test that rescaling all samples is the same as changing C
clf = svm.SVC()
clf.fit(X, Y)
dual_coef_no_weight = clf.dual_coef_
clf.set_params(C=100)
clf.fit(X, Y, sample_weight=np.repeat(0.01, len(X)))
assert_array_almost_equal(dual_coef_no_weight, clf.dual_coef_)
def test_auto_weight():
# Test class weights for imbalanced data
from sklearn.linear_model import LogisticRegression
# We take as dataset the two-dimensional projection of iris so
# that it is not separable and remove half of predictors from
# class 1.
# We add one to the targets as a non-regression test: class_weight="balanced"
# used to work only when the labels where a range [0..K).
from sklearn.utils import compute_class_weight
X, y = iris.data[:, :2], iris.target + 1
unbalanced = np.delete(np.arange(y.size), np.where(y > 2)[0][::2])
classes = np.unique(y[unbalanced])
class_weights = compute_class_weight('balanced', classes, y[unbalanced])
assert_true(np.argmax(class_weights) == 2)
for clf in (svm.SVC(kernel='linear'), svm.LinearSVC(random_state=0),
LogisticRegression()):
# check that score is better when class='balanced' is set.
y_pred = clf.fit(X[unbalanced], y[unbalanced]).predict(X)
clf.set_params(class_weight='balanced')
y_pred_balanced = clf.fit(X[unbalanced], y[unbalanced],).predict(X)
assert_true(metrics.f1_score(y, y_pred, average='weighted')
<= metrics.f1_score(y, y_pred_balanced,
average='weighted'))
def test_bad_input():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X, Y2)
# Test with arrays that are non-contiguous.
for clf in (svm.SVC(), svm.LinearSVC(random_state=0)):
Xf = np.asfortranarray(X)
assert_false(Xf.flags['C_CONTIGUOUS'])
yf = np.ascontiguousarray(np.tile(Y, (2, 1)).T)
yf = yf[:, -1]
assert_false(yf.flags['F_CONTIGUOUS'])
assert_false(yf.flags['C_CONTIGUOUS'])
clf.fit(Xf, yf)
assert_array_equal(clf.predict(T), true_result)
# error for precomputed kernelsx
clf = svm.SVC(kernel='precomputed')
assert_raises(ValueError, clf.fit, X, Y)
# sample_weight bad dimensions
clf = svm.SVC()
assert_raises(ValueError, clf.fit, X, Y, sample_weight=range(len(X) - 1))
# predict with sparse input when trained with dense
clf = svm.SVC().fit(X, Y)
assert_raises(ValueError, clf.predict, sparse.lil_matrix(X))
Xt = np.array(X).T
clf.fit(np.dot(X, Xt), Y)
assert_raises(ValueError, clf.predict, X)
clf = svm.SVC()
clf.fit(X, Y)
assert_raises(ValueError, clf.predict, Xt)
def test_sparse_precomputed():
clf = svm.SVC(kernel='precomputed')
sparse_gram = sparse.csr_matrix([[1, 0], [0, 1]])
try:
clf.fit(sparse_gram, [0, 1])
assert not "reached"
except TypeError as e:
assert_in("Sparse precomputed", str(e))
def test_linearsvc_parameters():
# Test possible parameter combinations in LinearSVC
# Generate list of possible parameter combinations
losses = ['hinge', 'squared_hinge', 'logistic_regression', 'foo']
penalties, duals = ['l1', 'l2', 'bar'], [True, False]
X, y = make_classification(n_samples=5, n_features=5)
for loss, penalty, dual in itertools.product(losses, penalties, duals):
clf = svm.LinearSVC(penalty=penalty, loss=loss, dual=dual)
if ((loss, penalty) == ('hinge', 'l1') or
(loss, penalty, dual) == ('hinge', 'l2', False) or
(penalty, dual) == ('l1', True) or
loss == 'foo' or penalty == 'bar'):
assert_raises_regexp(ValueError,
"Unsupported set of arguments.*penalty='%s.*"
"loss='%s.*dual=%s"
% (penalty, loss, dual),
clf.fit, X, y)
else:
clf.fit(X, y)
# Incorrect loss value - test if explicit error message is raised
assert_raises_regexp(ValueError, ".*loss='l3' is not supported.*",
svm.LinearSVC(loss="l3").fit, X, y)
# FIXME remove in 1.0
def test_linearsvx_loss_penalty_deprecations():
X, y = [[0.0], [1.0]], [0, 1]
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the %s will be removed in %s")
# LinearSVC
# loss l1/L1 --> hinge
assert_warns_message(DeprecationWarning,
msg % ("l1", "hinge", "loss='l1'", "1.0"),
svm.LinearSVC(loss="l1").fit, X, y)
# loss l2/L2 --> squared_hinge
assert_warns_message(DeprecationWarning,
msg % ("L2", "squared_hinge", "loss='L2'", "1.0"),
svm.LinearSVC(loss="L2").fit, X, y)
# LinearSVR
# loss l1/L1 --> epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("L1", "epsilon_insensitive", "loss='L1'",
"1.0"),
svm.LinearSVR(loss="L1").fit, X, y)
# loss l2/L2 --> squared_epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("l2", "squared_epsilon_insensitive",
"loss='l2'", "1.0"),
svm.LinearSVR(loss="l2").fit, X, y)
# FIXME remove in 0.18
def test_linear_svx_uppercase_loss_penalty():
# Check if Upper case notation is supported by _fit_liblinear
# which is called by fit
X, y = [[0.0], [1.0]], [0, 1]
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the uppercase notation will be removed in %s")
# loss SQUARED_hinge --> squared_hinge
assert_warns_message(DeprecationWarning,
msg % ("SQUARED_hinge", "squared_hinge", "0.18"),
svm.LinearSVC(loss="SQUARED_hinge").fit, X, y)
# penalty L2 --> l2
assert_warns_message(DeprecationWarning,
msg.replace("loss", "penalty")
% ("L2", "l2", "0.18"),
svm.LinearSVC(penalty="L2").fit, X, y)
# loss EPSILON_INSENSITIVE --> epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("EPSILON_INSENSITIVE", "epsilon_insensitive",
"0.18"),
svm.LinearSVR(loss="EPSILON_INSENSITIVE").fit, X, y)
def test_linearsvc():
# Test basic routines using LinearSVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
# by default should have intercept
assert_true(clf.fit_intercept)
assert_array_equal(clf.predict(T), true_result)
assert_array_almost_equal(clf.intercept_, [0], decimal=3)
# the same with l1 penalty
clf = svm.LinearSVC(penalty='l1', loss='squared_hinge', dual=False, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty with dual formulation
clf = svm.LinearSVC(penalty='l2', dual=True, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty, l1 loss
clf = svm.LinearSVC(penalty='l2', loss='hinge', dual=True, random_state=0)
clf.fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# test also decision function
dec = clf.decision_function(T)
res = (dec > 0).astype(np.int) + 1
assert_array_equal(res, true_result)
def test_linearsvc_crammer_singer():
# Test LinearSVC with crammer_singer multi-class svm
ovr_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
cs_clf = svm.LinearSVC(multi_class='crammer_singer', random_state=0)
cs_clf.fit(iris.data, iris.target)
# similar prediction for ovr and crammer-singer:
assert_true((ovr_clf.predict(iris.data) ==
cs_clf.predict(iris.data)).mean() > .9)
# classifiers shouldn't be the same
assert_true((ovr_clf.coef_ != cs_clf.coef_).all())
# test decision function
assert_array_equal(cs_clf.predict(iris.data),
np.argmax(cs_clf.decision_function(iris.data), axis=1))
dec_func = np.dot(iris.data, cs_clf.coef_.T) + cs_clf.intercept_
assert_array_almost_equal(dec_func, cs_clf.decision_function(iris.data))
def test_crammer_singer_binary():
# Test Crammer-Singer formulation in the binary case
X, y = make_classification(n_classes=2, random_state=0)
for fit_intercept in (True, False):
acc = svm.LinearSVC(fit_intercept=fit_intercept,
multi_class="crammer_singer",
random_state=0).fit(X, y).score(X, y)
assert_greater(acc, 0.9)
def test_linearsvc_iris():
# Test that LinearSVC gives plausible predictions on the iris dataset
# Also, test symbolic class names (classes_).
target = iris.target_names[iris.target]
clf = svm.LinearSVC(random_state=0).fit(iris.data, target)
assert_equal(set(clf.classes_), set(iris.target_names))
assert_greater(np.mean(clf.predict(iris.data) == target), 0.8)
dec = clf.decision_function(iris.data)
pred = iris.target_names[np.argmax(dec, 1)]
assert_array_equal(pred, clf.predict(iris.data))
def test_dense_liblinear_intercept_handling(classifier=svm.LinearSVC):
# Test that dense liblinear honours intercept_scaling param
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = classifier(fit_intercept=True, penalty='l1', loss='squared_hinge',
dual=False, C=4, tol=1e-7, random_state=0)
assert_true(clf.intercept_scaling == 1, clf.intercept_scaling)
assert_true(clf.fit_intercept)
# when intercept_scaling is low the intercept value is highly "penalized"
# by regularization
clf.intercept_scaling = 1
clf.fit(X, y)
assert_almost_equal(clf.intercept_, 0, decimal=5)
# when intercept_scaling is sufficiently high, the intercept value
# is not affected by regularization
clf.intercept_scaling = 100
clf.fit(X, y)
intercept1 = clf.intercept_
assert_less(intercept1, -1)
# when intercept_scaling is sufficiently high, the intercept value
# doesn't depend on intercept_scaling value
clf.intercept_scaling = 1000
clf.fit(X, y)
intercept2 = clf.intercept_
assert_array_almost_equal(intercept1, intercept2, decimal=2)
def test_liblinear_set_coef():
# multi-class case
clf = svm.LinearSVC().fit(iris.data, iris.target)
values = clf.decision_function(iris.data)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(iris.data)
assert_array_almost_equal(values, values2)
# binary-class case
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = svm.LinearSVC().fit(X, y)
values = clf.decision_function(X)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(X)
assert_array_equal(values, values2)
def test_immutable_coef_property():
# Check that primal coef modification are not silently ignored
svms = [
svm.SVC(kernel='linear').fit(iris.data, iris.target),
svm.NuSVC(kernel='linear').fit(iris.data, iris.target),
svm.SVR(kernel='linear').fit(iris.data, iris.target),
svm.NuSVR(kernel='linear').fit(iris.data, iris.target),
svm.OneClassSVM(kernel='linear').fit(iris.data),
]
for clf in svms:
assert_raises(AttributeError, clf.__setattr__, 'coef_', np.arange(3))
assert_raises((RuntimeError, ValueError),
clf.coef_.__setitem__, (0, 0), 0)
def test_linearsvc_verbose():
# stdout: redirect
import os
stdout = os.dup(1) # save original stdout
os.dup2(os.pipe()[1], 1) # replace it
# actual call
clf = svm.LinearSVC(verbose=1)
clf.fit(X, Y)
# stdout: restore
os.dup2(stdout, 1) # restore original stdout
def test_svc_clone_with_callable_kernel():
# create SVM with callable linear kernel, check that results are the same
# as with built-in linear kernel
svm_callable = svm.SVC(kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0,
decision_function_shape='ovr')
# clone for checking clonability with lambda functions..
svm_cloned = base.clone(svm_callable)
svm_cloned.fit(iris.data, iris.target)
svm_builtin = svm.SVC(kernel='linear', probability=True, random_state=0,
decision_function_shape='ovr')
svm_builtin.fit(iris.data, iris.target)
assert_array_almost_equal(svm_cloned.dual_coef_,
svm_builtin.dual_coef_)
assert_array_almost_equal(svm_cloned.intercept_,
svm_builtin.intercept_)
assert_array_equal(svm_cloned.predict(iris.data),
svm_builtin.predict(iris.data))
assert_array_almost_equal(svm_cloned.predict_proba(iris.data),
svm_builtin.predict_proba(iris.data),
decimal=4)
assert_array_almost_equal(svm_cloned.decision_function(iris.data),
svm_builtin.decision_function(iris.data))
def test_svc_bad_kernel():
svc = svm.SVC(kernel=lambda x, y: x)
assert_raises(ValueError, svc.fit, X, Y)
def test_timeout():
a = svm.SVC(kernel=lambda x, y: np.dot(x, y.T), probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, a.fit, X, Y)
def test_unfitted():
X = "foo!" # input validation not required when SVM not fitted
clf = svm.SVC()
assert_raises_regexp(Exception, r".*\bSVC\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
clf = svm.NuSVR()
assert_raises_regexp(Exception, r".*\bNuSVR\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
# ignore convergence warnings from max_iter=1
@ignore_warnings
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
def test_linear_svc_convergence_warnings():
# Test that warnings are raised if model does not converge
lsvc = svm.LinearSVC(max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, lsvc.fit, X, Y)
assert_equal(lsvc.n_iter_, 2)
def test_svr_coef_sign():
# Test that SVR(kernel="linear") has coef_ with the right sign.
# Non-regression test for #2933.
X = np.random.RandomState(21).randn(10, 3)
y = np.random.RandomState(12).randn(10)
for svr in [svm.SVR(kernel='linear'), svm.NuSVR(kernel='linear'),
svm.LinearSVR()]:
svr.fit(X, y)
assert_array_almost_equal(svr.predict(X),
np.dot(X, svr.coef_.ravel()) + svr.intercept_)
def test_linear_svc_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
lsvc = svm.LinearSVC(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % lsvc.intercept_scaling)
assert_raise_message(ValueError, msg, lsvc.fit, X, Y)
def test_lsvc_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
lsvc = svm.LinearSVC(fit_intercept=False)
lsvc.fit(X, Y)
assert_equal(lsvc.intercept_, 0.)
def test_hasattr_predict_proba():
# Method must be (un)available before or after fit, switched by
# `probability` param
G = svm.SVC(probability=True)
assert_true(hasattr(G, 'predict_proba'))
G.fit(iris.data, iris.target)
assert_true(hasattr(G, 'predict_proba'))
G = svm.SVC(probability=False)
assert_false(hasattr(G, 'predict_proba'))
G.fit(iris.data, iris.target)
assert_false(hasattr(G, 'predict_proba'))
# Switching to `probability=True` after fitting should make
# predict_proba available, but calling it must not work:
G.probability = True
assert_true(hasattr(G, 'predict_proba'))
msg = "predict_proba is not available when fitted with probability=False"
assert_raise_message(NotFittedError, msg, G.predict_proba, iris.data)
| bsd-3-clause |
jramcast/ml_weather | example8/preprocessing.py | 2 | 3314 | """
Module for preprocessing data before
feeding it into the classfier
"""
import string
import re
from nltk.stem import SnowballStemmer
from nltk.corpus import stopwords
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.preprocessing import MinMaxScaler, Imputer
from sklearn.feature_extraction import DictVectorizer
from textblob import TextBlob
class SentimentExtractor(BaseEstimator, TransformerMixin):
"""
Extracts sentiment features from tweets
"""
def __init__(self):
pass
def transform(self, tweets, y_train=None):
samples = []
for tweet in tweets:
textBlob = TextBlob(tweet)
samples.append({
'sent_polarity': textBlob.sentiment.polarity,
'sent_subjetivity': textBlob.sentiment.subjectivity
})
vectorized = DictVectorizer().fit_transform(samples).toarray()
vectorized = Imputer().fit_transform(vectorized)
vectorized_scaled = MinMaxScaler().fit_transform(vectorized)
return vectorized_scaled
def fit(self, X, y=None):
return self
class TempExtractor(BaseEstimator, TransformerMixin):
"""
Extracts weather temp from tweet
"""
def transform(self, tweets, y_train=None):
tempetures = [[self.get_temperature(tweet)] for tweet in tweets]
vectorized = self.imputer.transform(tempetures)
vectorized_scaled = MinMaxScaler().fit_transform(vectorized)
return vectorized_scaled
def fit(self, tweets, y=None):
self.imputer = Imputer()
tempetures = [[self.get_temperature(tweet)] for tweet in tweets]
self.imputer.fit(tempetures)
return self
def get_temperature(self, tweet):
match = re.search(r'(\d+(\.\d)?)\s*F', tweet, re.IGNORECASE)
if match:
value = float(match.group(1))
celsius = (value - 32) / 1.8
if - 100 < celsius < 100:
return celsius
return None
class WindExtractor(BaseEstimator, TransformerMixin):
"""
Extracts wind from tweet
"""
def transform(self, tweets, y_train=None):
winds = [[self.get_wind(tweet)] for tweet in tweets]
vectorized = self.imputer.transform(winds)
vectorized_scaled = MinMaxScaler().fit_transform(vectorized)
return vectorized_scaled
def fit(self, tweets, y=None):
self.imputer = Imputer()
winds = [[self.get_wind(tweet)] for tweet in tweets]
self.imputer.fit(winds)
return self
def get_wind(self, tweet):
match = re.search(r'(\d+(\.\d)?)\s*mph', tweet, re.IGNORECASE)
if match:
value = float(match.group(1))
kph = value * 1.60934
if 0 <= kph < 500:
return kph
return None
stopwords_list = stopwords.words('english')
def stem_tokens(tokens, stemmer):
stemmer = SnowballStemmer('english')
stemmed = []
for item in tokens:
stemmed.append(stemmer.stem(item))
return stemmed
def tokenize(text):
non_words = list(string.punctuation)
non_words.extend(['¿', '¡'])
text = ''.join([c for c in text if c not in non_words])
sentence = TextBlob(text)
tokens = [word.lemmatize() for word in sentence.words]
return tokens
| apache-2.0 |
wasade/American-Gut | tests/test_diversity_analysis.py | 5 | 38742 | #!/usr/bin/env python
from __future__ import division
from unittest import TestCase, main
import numpy as np
import numpy.testing as npt
import skbio
from os import rmdir
from os.path import realpath, dirname, join as pjoin, exists
from pandas import Series, DataFrame, Index
from pandas.util.testing import assert_index_equal, assert_frame_equal
from americangut.diversity_analysis import (pad_index,
check_dir,
post_hoc_pandas,
multiple_correct_post_hoc,
get_distance_vectors,
segment_colormap,
_get_bar_height,
_get_p_value,
_correct_p_value,
split_taxa,
get_ratio_heatmap)
__author__ = "Justine Debelius"
__copyright__ = "Copyright 2014"
__credits__ = ["Justine Debelius"]
__license__ = "BSD"
__version__ = "unversioned"
__maintainer__ = "Justine Debelius"
__email__ = "[email protected]"
# Determines the location fo the reference files
TEST_DIR = dirname(realpath(__file__))
class DiversityAnalysisTest(TestCase):
def setUp(self):
# Sets up lists for the data frame
self.ids = ['000001181.5654', '000001096.8485', '000001348.2238',
'000001239.2471', '000001925.5603', '000001098.6354',
'000001577.8059', '000001778.097' , '000001969.1967',
'000001423.7093', '000001180.1049', '000001212.5887',
'000001984.9281', '000001025.9349', '000001464.5884',
'000001800.6787', '000001629.5398', '000001473.443',
'000001351.1149', '000001223.1658', '000001884.0338',
'000001431.6762', '000001436.0807', '000001726.2609',
'000001717.784' , '000001290.9612', '000001806.4843',
'000001490.0658', '000001719.4572', '000001244.6229',
'000001092.3014', '000001315.8661', '000001525.8659',
'000001864.7889', '000001816.9' , '000001916.7858',
'000001261.3164', '000001593.2364', '000001817.3052',
'000001879.8596', '000001509.217' , '000001658.4638',
'000001741.9117', '000001940.457' , '000001620.315' ,
'000001706.6473', '000001287.1914', '000001370.8878',
'000001943.0664', '000001187.2735', '000001065.4497',
'000001598.6903', '000001254.2929', '000001526.143' ,
'000001980.8969', '000001147.6823', '000001745.3174',
'000001978.6417', '000001547.4582', '000001649.7564',
'000001752.3511', '000001231.5535', '000001875.7213',
'000001247.5567', '000001412.7777', '000001364.1045',
'000001124.3191', '000001654.0339', '000001795.4842',
'000001069.8469', '000001149.2945', '000001858.8903',
'000001667.8228', '000001648.5881', '000001775.0501',
'000001023.1689', '000001001.0859', '000001129.0853',
'000001992.9674', '000001174.3727', '000001126.3446',
'000001553.099' , '000001700.7898', '000001345.5369',
'000001821.4033', '000001921.0702', '000001368.0382',
'000001589.0756', '000001428.6135', '000001417.7107',
'000001050.2949', '000001549.0374', '000001169.7575',
'000001827.0751', '000001974.5358', '000001081.3137',
'000001452.7866', '000001194.8171', '000001781.3765',
'000001676.7693', '000001536.9816', '000001123.9341',
'000001950.0472', '000001386.1622', '000001789.8068',
'000001434.209', '000001156.782' , '000001630.8111',
'000001930.9789', '000001136.2997', '000001901.1578',
'000001358.6365', '000001834.4873', '000001175.739' ,
'000001565.3199', '000001532.5022', '000001844.4434',
'000001374.6652', '000001066.9395', '000001277.3526',
'000001765.7054', '000001195.7903', '000001403.1857',
'000001267.8034', '000001463.8063', '000001567.256' ,
'000001986.3291', '000001912.5336', '000001179.8083',
'000001539.4475', '000001702.7498', '000001362.2036',
'000001605.3957', '000001966.5905', '000001690.2717',
'000001796.78' , '000001965.9646', '000001570.6394',
'000001344.0749', '000001505.094' , '000001500.3763',
'000001887.334' , '000001896.9071', '000001061.5473',
'000001210.8434', '000001762.6421', '000001389.9375',
'000001747.7094', '000001275.7608', '000001100.6327',
'000001832.2851', '000001627.4754', '000001811.8183',
'000001202.8991', '000001163.3137', '000001196.7148',
'000001318.8771', '000001155.3022', '000001724.2977',
'000001737.328' , '000001289.1381', '000001480.495',
'000001797.7651', '000001117.9836', '000001108.0792',
'000001060.2191', '000001379.0706', '000001513.9224',
'000001731.9258', '000001563.7487', '000001988.1656',
'000001594.7285', '000001909.1042', '000001920.0818',
'000001999.9644', '000001133.9942', '000001608.1459',
'000001784.159' , '000001543.759' , '000001669.3403',
'000001545.3456', '000001177.5607', '000001387.8614',
'000001086.4642', '000001514.2136', '000001329.4163',
'000001757.7272', '000001574.9939', '000001750.1329',
'000001682.8423', '000001331.238' , '000001330.6685',
'000001556.6615', '000001575.4633', '000001754.591' ,
'000001456.5672', '000001707.2857', '000001164.864' ,
'000001466.7766', '000001383.5692', '000001911.8425',
'000001880.6072', '000001278.4999', '000001671.8068',
'000001301.3063', '000001071.2867', '000001192.7655',
'000001954.0541', '000001041.0466', '000001862.7417',
'000001587.4996', '000001242.6044', '000001040.399' ,
'000001744.3975', '000001189.5132', '000001885.0033',
'000001193.7964', '000001204.533' , '000001279.8583',
'000001488.2298', '000001971.1838', '000001492.0943',
'000001722.285' , '000001947.5481', '000001054.343' ,
'000001227.5756', '000001603.0731', '000001948.0095',
'000001393.6518', '000001661.6287', '000001829.9104',
'000001342.3216', '000001341.7147', '000001994.1765',
'000001400.0325', '000001324.5159', '000001355.789' ,
'000001538.6368', '000001121.0767', '000001377.1835',
'000001831.3158', '000001968.0205', '000001003.7916',
'000001502.0367', '000001729.5203', '000001284.1266',
'000001252.1786', '000001533.2349', '000001198.741' ,
'000001483.1918', '000001528.3996', '000001304.2649',
'000001281.7718', '000001441.8902', '000001203.4813',
'000001657.915' , '000001668.1396', '000001560.6021',
'000001213.1081', '000001894.5208', '000001791.9156',
'000001371.9864', '000001631.1904', '000001635.3301',
'000001541.2899', '000001748.311' , '000001326.0745',
'000001736.2491', '000001028.1898', '000001997.5772',
'000001764.9201', '000001664.4968', '000001031.0638',
'000001457.8448', '000001335.8157', '000001053.361' ,
'000001372.2917', '000001847.3652', '000001746.7838',
'000001173.0655', '000001653.9771', '000001104.8455',
'000001642.548' , '000001866.4881', '000001381.5643',
'000001673.6333', '000001839.2794', '000001855.195' ,
'000001698.1673', '000001813.0695', '000001153.6346',
'000001354.0321', '000001035.5915', '000001469.6652',
'000001422.9333', '000001148.4367', '000001551.0986',
'000001047.9434', '000001160.0422', '000001621.3736']
self.raw_ids = ['1181.5654', '1096.8485', '1348.2238', '1239.2471',
'1925.5603', '1098.6354', '1577.8059', '1778.097',
'1969.1967', '1423.7093', '1180.1049', '1212.5887',
'1984.9281', '1025.9349', '1464.5884', '1800.6787',
'1629.5398', '1473.443', '1351.1149', '1223.1658',
'1884.0338', '1431.6762', '1436.0807', '1726.2609',
'1717.784', '1290.9612', '1806.4843', '1490.0658',
'1719.4572', '1244.6229', '1092.3014', '1315.8661',
'1525.8659', '1864.7889', '1816.9', '1916.7858',
'1261.3164', '1593.2364', '1817.3052', '1879.8596',
'1509.217', '1658.4638', '1741.9117', '1940.457',
'1620.315', '1706.6473', '1287.1914', '1370.8878',
'1943.0664', '1187.2735', '1065.4497', '1598.6903',
'1254.2929', '1526.143', '1980.8969', '1147.6823',
'1745.3174', '1978.6417', '1547.4582', '1649.7564',
'1752.3511', '1231.5535', '1875.7213', '1247.5567',
'1412.7777', '1364.1045', '1124.3191', '1654.0339',
'1795.4842', '1069.8469', '1149.2945', '1858.8903',
'1667.8228', '1648.5881', '1775.0501', '1023.1689',
'1001.0859', '1129.0853', '1992.9674', '1174.3727',
'1126.3446', '1553.099', '1700.7898', '1345.5369',
'1821.4033', '1921.0702', '1368.0382', '1589.0756',
'1428.6135', '1417.7107', '1050.2949', '1549.0374',
'1169.7575', '1827.0751', '1974.5358', '1081.3137',
'1452.7866', '1194.8171', '1781.3765', '1676.7693',
'1536.9816', '1123.9341', '1950.0472', '1386.1622',
'1789.8068', '1434.209', '1156.782', '1630.8111',
'1930.9789', '1136.2997', '1901.1578', '1358.6365',
'1834.4873', '1175.739', '1565.3199', '1532.5022',
'1844.4434', '1374.6652', '1066.9395', '1277.3526',
'1765.7054', '1195.7903', '1403.1857', '1267.8034',
'1463.8063', '1567.256', '1986.3291', '1912.5336',
'1179.8083', '1539.4475', '1702.7498', '1362.2036',
'1605.3957', '1966.5905', '1690.2717', '1796.78',
'1965.9646', '1570.6394', '1344.0749', '1505.094',
'1500.3763', '1887.334', '1896.9071', '1061.5473',
'1210.8434', '1762.6421', '1389.9375', '1747.7094',
'1275.7608', '1100.6327', '1832.2851', '1627.4754',
'1811.8183', '1202.8991', '1163.3137', '1196.7148',
'1318.8771', '1155.3022', '1724.2977', '1737.328',
'1289.1381', '1480.495', '1797.7651', '1117.9836',
'1108.0792', '1060.2191', '1379.0706', '1513.9224',
'1731.9258', '1563.7487', '1988.1656', '1594.7285',
'1909.1042', '1920.0818', '1999.9644', '1133.9942',
'1608.1459', '1784.159', '1543.759', '1669.3403',
'1545.3456', '1177.5607', '1387.8614', '1086.4642',
'1514.2136', '1329.4163', '1757.7272', '1574.9939',
'1750.1329', '1682.8423', '1331.238', '1330.6685',
'1556.6615', '1575.4633', '1754.591', '1456.5672',
'1707.2857', '1164.864', '1466.7766', '1383.5692',
'1911.8425', '1880.6072', '1278.4999', '1671.8068',
'1301.3063', '1071.2867', '1192.7655', '1954.0541',
'1041.0466', '1862.7417', '1587.4996', '1242.6044',
'1040.399', '1744.3975', '1189.5132', '1885.0033',
'1193.7964', '1204.533', '1279.8583', '1488.2298',
'1971.1838', '1492.0943', '1722.285', '1947.5481',
'1054.343', '1227.5756', '1603.0731', '1948.0095',
'1393.6518', '1661.6287', '1829.9104', '1342.3216',
'1341.7147', '1994.1765', '1400.0325', '1324.5159',
'1355.789', '1538.6368', '1121.0767', '1377.1835',
'1831.3158', '1968.0205', '1003.7916', '1502.0367',
'1729.5203', '1284.1266', '1252.1786', '1533.2349',
'1198.741', '1483.1918', '1528.3996', '1304.2649',
'1281.7718', '1441.8902', '1203.4813', '1657.915',
'1668.1396', '1560.6021', '1213.1081', '1894.5208',
'1791.9156', '1371.9864', '1631.1904', '1635.3301',
'1541.2899', '1748.311', '1326.0745', '1736.2491',
'1028.1898', '1997.5772', '1764.9201', '1664.4968',
'1031.0638', '1457.8448', '1335.8157', '1053.361',
'1372.2917', '1847.3652', '1746.7838', '1173.0655',
'1653.9771', '1104.8455', '1642.548', '1866.4881',
'1381.5643', '1673.6333', '1839.2794', '1855.195',
'1698.1673', '1813.0695', '1153.6346', '1354.0321',
'1035.5915', '1469.6652', '1422.9333', '1148.4367',
'1551.0986', '1047.9434', '1160.0422', '1621.3736']
self.website = ['twitter', 'twitter', 'twitter', 'twitter', 'twitter',
'twitter', 'twitter', 'twitter', 'twitter', 'twitter',
'twitter', 'twitter', 'twitter', 'twitter', 'twitter',
'twitter', 'twitter', 'twitter', 'twitter', 'twitter',
'twitter', 'twitter', 'twitter', 'twitter', 'twitter',
'twitter', 'twitter', 'twitter', 'twitter', 'twitter',
'twitter', 'twitter', 'twitter', 'twitter', 'twitter',
'twitter', 'twitter', 'twitter', 'twitter', 'twitter',
'twitter', 'twitter', 'twitter', 'twitter', 'twitter',
'twitter', 'twitter', 'twitter', 'twitter', 'twitter',
'twitter', 'twitter', 'twitter', 'twitter', 'twitter',
'twitter', 'twitter', 'twitter', 'twitter', 'twitter',
'twitter', 'twitter', 'twitter', 'twitter', 'twitter',
'twitter', 'twitter', 'twitter', 'twitter', 'twitter',
'twitter', 'twitter', 'twitter', 'twitter', 'twitter',
'twitter', 'twitter', 'twitter', 'twitter', 'twitter',
'twitter', 'twitter', 'twitter', 'twitter', 'twitter',
'twitter', 'twitter', 'twitter', 'twitter', 'twitter',
'twitter', 'twitter', 'twitter', 'twitter', 'twitter',
'twitter', 'twitter', 'twitter', 'twitter', 'twitter',
'facebook', 'facebook', 'facebook', 'facebook',
'facebook', 'facebook', 'facebook', 'facebook',
'facebook', 'facebook', 'facebook', 'facebook',
'facebook', 'facebook', 'facebook', 'facebook',
'facebook', 'facebook', 'facebook', 'facebook',
'facebook', 'facebook', 'facebook', 'facebook',
'facebook', 'facebook', 'facebook', 'facebook',
'facebook', 'facebook', 'facebook', 'facebook',
'facebook', 'facebook', 'facebook', 'facebook',
'facebook', 'facebook', 'facebook', 'facebook',
'facebook', 'facebook', 'facebook', 'facebook',
'facebook', 'facebook', 'facebook', 'facebook',
'facebook', 'facebook', 'facebook', 'facebook',
'facebook', 'facebook', 'facebook', 'facebook',
'facebook', 'facebook', 'facebook', 'facebook',
'facebook', 'facebook', 'facebook', 'facebook',
'facebook', 'facebook', 'facebook', 'facebook',
'facebook', 'facebook', 'facebook', 'facebook',
'facebook', 'facebook', 'facebook', 'facebook',
'facebook', 'facebook', 'facebook', 'facebook',
'reddit', 'reddit', 'reddit', 'reddit', 'reddit',
'reddit', 'reddit', 'reddit', 'reddit', 'reddit',
'reddit', 'reddit', 'reddit', 'reddit', 'reddit',
'reddit', 'reddit', 'reddit', 'reddit', 'reddit',
'reddit', 'reddit', 'reddit', 'reddit', 'reddit',
'reddit', 'reddit', 'reddit', 'reddit', 'reddit',
'reddit', 'reddit', 'reddit', 'reddit', 'reddit',
'reddit', 'reddit', 'reddit', 'reddit', 'reddit',
'reddit', 'reddit', 'reddit', 'reddit', 'reddit',
'reddit', 'reddit', 'reddit', 'reddit', 'reddit',
'reddit', 'reddit', 'reddit', 'reddit', 'reddit',
'reddit', 'reddit', 'reddit', 'reddit', 'reddit',
'reddit', 'reddit', 'reddit', 'reddit', 'reddit',
'reddit', 'reddit', 'reddit', 'reddit', 'reddit',
'reddit', 'reddit', 'reddit', 'reddit', 'reddit',
'reddit', 'reddit', 'reddit', 'reddit', 'reddit',
'reddit', 'reddit', 'reddit', 'reddit', 'reddit',
'reddit', 'reddit', 'reddit', 'reddit', 'reddit',
'reddit', 'reddit', 'reddit', 'reddit', 'reddit',
'reddit', 'reddit', 'reddit', 'reddit', 'reddit',
'reddit', 'reddit', 'reddit', 'reddit', 'reddit',
'reddit', 'reddit', 'reddit', 'reddit', 'reddit',
'reddit', 'reddit', 'reddit', 'reddit', 'reddit',
'reddit', 'reddit', 'reddit', 'reddit', 'reddit']
self.time = np.array([43.75502506, 32.09982846, 66.44821015,
54.67751100, 74.43663107, 64.91509381,
101.03624273, 42.50120543, 35.92898678,
50.84800153, 46.32394154, 55.82813196,
63.90361272, 77.13825762, 78.76436441,
53.64704526, 64.75223193, 58.39207272,
52.44353642, 60.38707826, 56.51714085,
55.72374379, 59.52585080, 62.99625025,
40.04902494, 89.02585909, 63.23240605,
47.06553888, 73.00190315, 83.80903794,
43.41851989, 25.83410322, 68.21623464,
50.43442676, 49.98389215, 40.24409163,
73.12600309, 59.26529974, 61.66301113,
82.24776146, 69.88472085, 55.33333433,
40.29625976, 68.09510810, 66.85545440,
66.44002527, 72.37790419, 72.81679314,
55.09080142, 48.37538346, 47.60326036,
51.52223083, 56.51417473, 83.04863572,
52.14761947, 81.71073287, 40.88456188,
61.76308339, 75.31540245, 64.41482716,
52.36763551, 64.48863043, 42.46265519,
76.41626766, 73.35103300, 60.13966132,
55.09395578, 72.26945197, 64.14173225,
59.39558958, 54.92166432, 56.15937888,
35.82839971, 80.22338349, 52.03277136,
30.46794613, 58.48158453, 51.08064303,
67.56882508, 64.67001088, 70.31701029,
69.69418892, 45.40860831, 68.72559847,
57.18659048, 79.66512776, 54.12521925,
81.23543425, 79.58214820, 34.09101162,
34.07926981, 53.68661297, 84.73351889,
76.98667389, 83.91038109, 66.35125602,
43.38243470, 60.07458569, 64.01561208,
70.66573983, 193.40761370, 149.46771172,
178.54940784, 146.81737462, 112.67080963,
105.79566831, 169.60015351, 18.16782312,
32.33793705, 161.72043630, 136.65935083,
23.99200240, 124.30215961, 82.66230873,
181.53122374, 96.73843934, 149.75297762,
119.92104479, 29.30535556, 88.98066487,
82.18281694, 99.76251178, 120.62310261,
136.15837651, 140.85019656, 117.06990731,
163.65366512, 214.50717765, 79.72206954,
138.03112015, 144.45114437, 16.41512219,
72.08551518, 85.46372630, 149.13372767,
76.92212059, 109.55645713, 141.65595764,
119.18734692, 51.20662038, 183.75411201,
132.56555213, 101.55378472, 177.69500317,
130.27160521, 143.13166882, 107.23696643,
212.72330518, 130.66925461, 210.11532010,
118.65653641, 77.25638890, 153.29389237,
146.97514023, 0, 105.83704268,
200.05768527, 166.46158871, 135.60586892,
111.06739555, 71.50642636, 21.58216051,
183.15691697, 38.58822892, 38.84706613,
119.36492288, 108.77038019, 88.70541115,
12.61048676, 0, 157.77516036,
43.70631550, 193.87291179, 203.26411137,
179.20054809, 148.37792309, 170.38620220,
102.23651707, 63.46142967, 82.33043919,
258.68968847, 223.94730803, 281.46276889,
350.40078080, 281.53639290, 305.90987647,
286.22932832, 356.53308940, 276.81798226,
305.04298118, 299.13866751, 310.41638501,
347.77589112, 278.37912458, 295.00398672,
292.23076451, 348.14209652, 289.14551826,
288.86118512, 299.21300848, 264.29449774,
353.26294987, 275.68453639, 279.45885854,
287.79470948, 303.34990705, 324.73398364,
337.50702196, 326.59649321, 307.14724645,
300.13203731, 335.28447725, 273.59560986,
315.71949943, 268.86100671, 309.44822617,
357.67123883, 313.70684577, 311.99209985,
277.87145259, 316.89239037, 254.39694340,
300.02140552, 237.21539997, 329.92714491,
318.32432005, 326.65600788, 305.40145477,
326.78894825, 318.92641904, 320.59443395,
308.26919092, 300.00328438, 294.61849344,
284.55947774, 277.63798594, 359.44015820,
292.55982554, 322.71946292, 318.60262991,
307.93128984, 282.51266904, 304.74114309,
285.30356994, 240.53264849, 252.69086070,
289.49431273, 284.68590654, 317.95577632,
288.39433522, 303.55186227, 286.21794163,
281.11550530, 297.15770465, 307.37441274,
290.21885096, 297.39693356, 325.12591032,
340.14615302, 314.10755364, 321.41818630,
302.46825284, 272.60859596, 285.02155314,
260.57728373, 301.01186081, 314.01532677,
301.39435122, 301.53108663, 290.81233377,
331.20632569, 329.26192444, 252.12513671,
294.17604509, 314.25160994, 260.22225619,
296.06068483, 328.70473699, 293.72532762,
323.92449714, 279.36077985, 327.10547840,
332.33552711, 244.70073987, 368.94370441,
288.52914183, 270.96734651, 321.09234466,
395.74872017, 311.64415600, 314.81990465,
319.70690366, 313.96061624, 275.38526052,
338.02460670, 286.98781666, 353.55909038,
306.62353307, 306.92733543, 273.74222557])
# # Creates a data frame object
self.df = DataFrame({'WEBSITE': Series(self.website, index=self.ids),
'DWELL_TIME': Series(self.time, index=self.ids)})
# Creates the distance matrix object
self.ids2 = np.array(['000001181.5654', '000001096.8485',
'000001348.2238', '000001239.2471',
'000001925.5603', '000001148.4367',
'000001551.0986', '000001047.9434',
'000001160.0422', '000001621.3736'])
self.map = self.df.loc[self.ids2]
dist = np.array([[0.000, 0.297, 0.257, 0.405, 0.131, 0.624, 0.934,
0.893, 0.519, 0.904],
[0.297, 0.000, 0.139, 0.130, 0.348, 1.000, 0.796,
1.000, 0.647, 0.756],
[0.257, 0.139, 0.000, 0.384, 0.057, 0.748, 0.599,
0.710, 0.528, 1.000],
[0.405, 0.130, 0.384, 0.000, 0.303, 0.851, 0.570,
0.698, 1.000, 0.638],
[0.131, 0.348, 0.057, 0.303, 0.000, 0.908, 1.000,
0.626, 0.891, 1.000],
[0.624, 1.000, 0.748, 0.851, 0.908, 0.000, 0.264,
0.379, 0.247, 0.385],
[0.934, 0.796, 0.599, 0.570, 1.000, 0.264, 0.000,
0.336, 0.326, 0.530],
[0.893, 1.000, 0.710, 0.698, 0.626, 0.379, 0.336,
0.000, 0.257, 0.450],
[0.519, 0.647, 0.528, 1.000, 0.891, 0.247, 0.326,
0.257, 0.000, 0.492],
[0.904, 0.756, 1.000, 0.638, 1.000, 0.385, 0.530,
0.450, 0.492, 0.000]])
self.dm = skbio.DistanceMatrix(dist, self.ids2)
self.taxa = ['k__Bacteria; p__[Proteobacteria]; '
'c__Gammaproteobacteria; o__; f__; g__; s__',
'k__Bacteria; p__Proteobacteria; '
'c__Gammaproteobacteria; o__Enterobacteriales; '
'f__Enterbacteriaceae; g__Escherichia; s__coli']
self.sub_p = DataFrame(np.array([['ref_group1 vs. ref_group1',
'ref_group1 vs. group1', 0.01],
['ref_group2 vs. group2',
'ref_group2 vs. ref_group2', 0.02],
['group3 vs. ref_group3',
'ref_group3 vs. ref_group3', 0.03],
['ref_group4 vs. ref_group4',
'group4 vs. ref_group4', 0.04]]),
columns=['Group 1', 'Group 2', 'p_value'])
self.sub_p.p_value = self.sub_p.p_value.astype(float)
self.sub_p_lookup = {k: set(self.sub_p[k].values) for k in
('Group 1', 'Group 2')}
def test_pad_index_default(self):
# Creates a data frame with raw ids and no sample column
df = DataFrame({'#SampleID': self.raw_ids,
'WEBSITE': Series(self.website),
'DWELL_TIME': Series(self.time)})
# Pads the raw text
df = pad_index(df)
assert_index_equal(self.df.index, df.index)
def test_pad_index_custom_index(self):
# Creates a data frame with raw ids and no sample column
df = DataFrame({'RawID': self.raw_ids,
'WEBSITE': Series(self.website),
'DWELL_TIME': Series(self.time)})
# Pads the raw text
df = pad_index(df, index_col='RawID')
assert_index_equal(self.df.index, df.index)
def test_pad_index_number(self):
# Creates a data frame with raw ids and no sample column
df = DataFrame({'#SampleID': self.raw_ids,
'WEBSITE': Series(self.website),
'DWELL_TIME': Series(self.time)})
# Pads the raw text
df = pad_index(df, nzeros=4)
assert_index_equal(Index(self.raw_ids), df.index)
def test_check_dir(self):
# Sets up a dummy directory that does not exist
does_not_exist = pjoin(TEST_DIR, 'this_dir_does_not_exist')
# Checks the directory does not currently exist
self.assertFalse(exists(does_not_exist))
# checks the directory
check_dir(does_not_exist)
# Checks the directory exists now
self.assertTrue(exists(does_not_exist))
# Removes the directory
rmdir(does_not_exist)
def test_post_hoc_pandas(self):
known_index = Index(['twitter', 'facebook', 'reddit'],
name='WEBSITE')
known_df = DataFrame(np.array([[100, 60.435757, 60.107124, 14.632637,
np.nan, np.nan],
[80, 116.671135, 119.642984, 54.642403,
7.010498e-14, np.nan],
[120, 302.615690, 301.999670,
28.747101, 2.636073e-37,
5.095701e-33]]),
index=known_index,
columns=['Counts', 'Mean', 'Median', 'Stdv',
'twitter', 'facebook'])
known_df.Counts = known_df.Counts.astype('int64')
test_df = post_hoc_pandas(self.df, 'WEBSITE', 'DWELL_TIME')
assert_frame_equal(known_df, test_df)
def test_multiple_correct_post_hoc(self):
known_df = DataFrame(np.array([[np.nan, 4e-2, 1e-3],
[4e-4, np.nan, 1e-6],
[4e-7, 4e-8, np.nan]]),
columns=[0, 1, 2])
raw_ph = DataFrame(np.power(10, -np.array([[np.nan, 2, 3],
[4, np.nan, 6],
[7, 8, np.nan]])),
columns=[0, 1, 2])
order = np.arange(0, 3)
test_df = multiple_correct_post_hoc(raw_ph, order, 'fdr_bh')
assert_frame_equal(known_df, test_df)
def test_segemented_colormap(self):
known_cmap = np.array([[0.88207613, 0.95386390, 0.69785469, 1.],
[0.59215687, 0.84052289, 0.72418302, 1.],
[0.25268744, 0.71144946, 0.76838141, 1.],
[0.12026144, 0.50196080, 0.72156864, 1.],
[0.14136102, 0.25623991, 0.60530568, 1.]])
test_cmap = segment_colormap('YlGnBu', 5)
npt.assert_almost_equal(test_cmap, known_cmap, 5)
def test_get_bar_height(self):
test_lowest, test_fudge = \
_get_bar_height(np.array([0.01, 0.02, 0.3, 0.52]))
npt.assert_almost_equal(test_lowest, 0.55, 3)
self.assertEqual(test_fudge, 10)
def test_get_bar_height_fudge(self):
test_lowest, test_fudge = \
_get_bar_height(np.array([0.01, 0.02, 0.3, 0.52]), factor=3)
self.assertEqual(test_lowest, 0.54)
self.assertEqual(test_fudge, 10)
def test_get_p_value(self):
self.assertEqual(_get_p_value(self.sub_p, self.sub_p_lookup,
'ref_group1', 'group1', 'p_value'), 0.01)
self.assertEqual(_get_p_value(self.sub_p, self.sub_p_lookup,
'ref_group2', 'group2', 'p_value'), 0.02)
self.assertEqual(_get_p_value(self.sub_p, self.sub_p_lookup,
'ref_group3', 'group3', 'p_value'), 0.03)
self.assertEqual(_get_p_value(self.sub_p, self.sub_p_lookup,
'ref_group4', 'group4', 'p_value'), 0.04)
def test_get_p_value_error(self):
with self.assertRaises(ValueError):
_get_p_value(self.sub_p, self.sub_p_lookup, 'ref_group',
'group', 'p_value')
def test_correct_p_value_no_tail(self):
p_value = 0.05
tail = False
self.assertEqual(_correct_p_value(tail, p_value, 1, 1), p_value)
def test_correct_p_value_no_greater_ref(self):
p_value = 0.05
tail = True
self.assertEqual(_correct_p_value(tail, p_value, 2, 1), 1)
def test_correct_p_value_no_less_ref(self):
p_value = 0.05
tail = True
self.assertEqual(_correct_p_value(tail, p_value, 1, 2), p_value)
def test_get_distance_vectors(self):
known_within = {'twitter': np.array([0.297, 0.257, 0.405, 0.131,
0.139, 0.130, 0.348, 0.384,
0.057, 0.303]),
'reddit': np.array([0.264, 0.379, 0.247, 0.385, 0.336,
0.326, 0.530, 0.257, 0.450,
0.492])}
known_between = {('twitter', 'reddit'): np.array([0.624, 0.934, 0.893,
0.519, 0.904, 1.000,
0.796, 1.000, 0.647,
0.756, 0.748, 0.599,
0.710, 0.528, 1.000,
0.851, 0.570, 0.698,
1.000, 0.638, 0.908,
1.000, 0.626, 0.891,
1.000])}
test_within, test_between = \
get_distance_vectors(dm=self.dm,
df=self.map,
group='WEBSITE',
order=['twitter', 'reddit'])
# Tests the results
self.assertEqual(known_within.keys(), test_within.keys())
self.assertEqual(known_between.keys(), test_between.keys())
for k, a in test_within.iteritems():
npt.assert_array_equal(known_within[k], a)
for k, a in test_between.iteritems():
npt.assert_array_equal(known_between[k], a)
def test_split_taxa_error(self):
with self.assertRaises(ValueError):
split_taxa(['k__Bacteria; p__[Proteobacteria]; '
'c__Gammaproteobacteria'], 7)
def test_split_taxa(self):
known_taxa = np.array([['Bacteria', 'cont. Proteobacteria',
'Gammaproteobacteria',
'c. Gammaproteobacteria',
'c. Gammaproteobacteria',
'c. Gammaproteobacteria',
'c. Gammaproteobacteria'],
['Bacteria', 'Proteobacteria',
'Gammaproteobacteria', 'Enterobacteriales',
'Enterbacteriaceae', 'Escherichia', 'coli']],
dtype='|S32')
known_levels = ['kingdom', 'phylum', 'p_class', 'p_order', 'family',
'genus', 'species']
test_taxa, test_levels = split_taxa(self.taxa, 7)
self.assertEqual(known_levels, test_levels)
npt.assert_array_equal(known_taxa, test_taxa)
def test_get_ratio_heatmap(self):
data = np.array([[1, 2, 3, 4],
[2, 4, 6, 8],
[3, 6, 9, 12],
[4, 8, 12, 16]])
known = np.array([[0.4, 0.8, 1.2, 1.6],
[0.4, 0.8, 1.2, 1.6],
[0.4, 0.8, 1.2, 1.6],
[0.4, 0.8, 1.2, 1.6]])
test = get_ratio_heatmap(data)
npt.assert_array_equal(test, known)
def test_get_ratio_heatmap_log(self):
data = np.array([[2, 4, 8, 16],
[1, 4, 16, 256]])
known = np.array([[0, 1, 2, 3],
[0, 2, 4, 8]])
test = get_ratio_heatmap(data, ref_pos=0, log=2)
npt.assert_array_equal(test, known)
if __name__ == '__main__':
main()
| bsd-3-clause |
oaastest/Azure-MachineLearning-ClientLibrary-Python | azureml/serialization.py | 4 | 5330 | #-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License:
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#--------------------------------------------------------------------------
from functools import partial
import codecs
import pandas as pd
from azureml.errors import (
UnsupportedDatasetTypeError,
_not_none,
_not_none_or_empty,
)
class DataTypeIds(object):
"""Constants for the known dataset data type id strings."""
ARFF = 'ARFF'
PlainText = 'PlainText'
GenericCSV = 'GenericCSV'
GenericTSV = 'GenericTSV'
GenericCSVNoHeader = 'GenericCSVNoHeader'
GenericTSVNoHeader = 'GenericTSVNoHeader'
def _dataframe_to_csv(writer, dataframe, delimiter, with_header):
"""serialize the dataframe with different delimiters"""
encoding_writer = codecs.getwriter('utf-8')(writer)
dataframe.to_csv(
path_or_buf=encoding_writer,
sep=delimiter,
header=with_header,
index=False
)
def _dataframe_to_txt(writer, dataframe):
encoding_writer = codecs.getwriter('utf-8')(writer)
for row in dataframe.iterrows():
encoding_writer.write("".join(row[1].tolist()))
encoding_writer.write('\n')
def _dataframe_from_csv(reader, delimiter, with_header, skipspace):
"""Returns csv data as a pandas Dataframe object"""
sep = delimiter
header = 0
if not with_header:
header = None
return pd.read_csv(
reader,
header=header,
sep=sep,
skipinitialspace=skipspace,
encoding='utf-8-sig'
)
def _dataframe_from_txt(reader):
"""Returns PlainText data as a pandas Dataframe object"""
return pd.read_csv(reader, header=None, sep="\n", encoding='utf-8-sig')
_SERIALIZERS = {
DataTypeIds.PlainText: (
_dataframe_to_txt,
_dataframe_from_txt,
),
DataTypeIds.GenericCSV: (
partial(_dataframe_to_csv, delimiter=',', with_header=True),
partial(_dataframe_from_csv, delimiter=',', with_header=True, skipspace=True),
),
DataTypeIds.GenericCSVNoHeader: (
partial(_dataframe_to_csv, delimiter=',', with_header=False),
partial(_dataframe_from_csv, delimiter=',', with_header=False, skipspace=True),
),
DataTypeIds.GenericTSV: (
partial(_dataframe_to_csv, delimiter='\t', with_header=True),
partial(_dataframe_from_csv, delimiter='\t', with_header=True, skipspace=False),
),
DataTypeIds.GenericTSVNoHeader: (
partial(_dataframe_to_csv, delimiter='\t', with_header=False),
partial(_dataframe_from_csv, delimiter='\t', with_header=False, skipspace=False),
),
}
def serialize_dataframe(writer, data_type_id, dataframe):
"""
Serialize a dataframe.
Parameters
----------
writer : file
File-like object to write to. Must be opened in binary mode.
data_type_id : dict
Serialization format to use.
See the azureml.DataTypeIds class for constants.
dataframe: pandas.DataFrame
Dataframe to serialize.
"""
_not_none('writer', writer)
_not_none_or_empty('data_type_id', data_type_id)
_not_none('dataframe', dataframe)
serializer = _SERIALIZERS.get(data_type_id)
if serializer is None:
raise UnsupportedDatasetTypeError(data_type_id)
serializer[0](writer=writer, dataframe=dataframe)
def deserialize_dataframe(reader, data_type_id):
"""
Deserialize a dataframe.
Parameters
----------
reader : file
File-like object to read from. Must be opened in binary mode.
data_type_id : dict
Serialization format of the raw data.
See the azureml.DataTypeIds class for constants.
Returns
-------
pandas.DataFrame
Dataframe object.
"""
_not_none('reader', reader)
_not_none_or_empty('data_type_id', data_type_id)
serializer = _SERIALIZERS.get(data_type_id)
if serializer is None:
raise UnsupportedDatasetTypeError(data_type_id)
return serializer[1](reader=reader)
def is_supported(data_type_id):
"""Return if a serializer is available for the specified format."""
_not_none_or_empty('data_type_id', data_type_id)
return _SERIALIZERS.get(data_type_id) is not None
| mit |
potash/scikit-learn | sklearn/datasets/mlcomp.py | 289 | 3855 | # Copyright (c) 2010 Olivier Grisel <[email protected]>
# License: BSD 3 clause
"""Glue code to load http://mlcomp.org data as a scikit.learn dataset"""
import os
import numbers
from sklearn.datasets.base import load_files
def _load_document_classification(dataset_path, metadata, set_=None, **kwargs):
if set_ is not None:
dataset_path = os.path.join(dataset_path, set_)
return load_files(dataset_path, metadata.get('description'), **kwargs)
LOADERS = {
'DocumentClassification': _load_document_classification,
# TODO: implement the remaining domain formats
}
def load_mlcomp(name_or_id, set_="raw", mlcomp_root=None, **kwargs):
"""Load a datasets as downloaded from http://mlcomp.org
Parameters
----------
name_or_id : the integer id or the string name metadata of the MLComp
dataset to load
set_ : select the portion to load: 'train', 'test' or 'raw'
mlcomp_root : the filesystem path to the root folder where MLComp datasets
are stored, if mlcomp_root is None, the MLCOMP_DATASETS_HOME
environment variable is looked up instead.
**kwargs : domain specific kwargs to be passed to the dataset loader.
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'filenames', the files holding the raw to learn, 'target', the
classification labels (integer index), 'target_names',
the meaning of the labels, and 'DESCR', the full description of the
dataset.
Note on the lookup process: depending on the type of name_or_id,
will choose between integer id lookup or metadata name lookup by
looking at the unzipped archives and metadata file.
TODO: implement zip dataset loading too
"""
if mlcomp_root is None:
try:
mlcomp_root = os.environ['MLCOMP_DATASETS_HOME']
except KeyError:
raise ValueError("MLCOMP_DATASETS_HOME env variable is undefined")
mlcomp_root = os.path.expanduser(mlcomp_root)
mlcomp_root = os.path.abspath(mlcomp_root)
mlcomp_root = os.path.normpath(mlcomp_root)
if not os.path.exists(mlcomp_root):
raise ValueError("Could not find folder: " + mlcomp_root)
# dataset lookup
if isinstance(name_or_id, numbers.Integral):
# id lookup
dataset_path = os.path.join(mlcomp_root, str(name_or_id))
else:
# assume name based lookup
dataset_path = None
expected_name_line = "name: " + name_or_id
for dataset in os.listdir(mlcomp_root):
metadata_file = os.path.join(mlcomp_root, dataset, 'metadata')
if not os.path.exists(metadata_file):
continue
with open(metadata_file) as f:
for line in f:
if line.strip() == expected_name_line:
dataset_path = os.path.join(mlcomp_root, dataset)
break
if dataset_path is None:
raise ValueError("Could not find dataset with metadata line: " +
expected_name_line)
# loading the dataset metadata
metadata = dict()
metadata_file = os.path.join(dataset_path, 'metadata')
if not os.path.exists(metadata_file):
raise ValueError(dataset_path + ' is not a valid MLComp dataset')
with open(metadata_file) as f:
for line in f:
if ":" in line:
key, value = line.split(":", 1)
metadata[key.strip()] = value.strip()
format = metadata.get('format', 'unknow')
loader = LOADERS.get(format)
if loader is None:
raise ValueError("No loader implemented for format: " + format)
return loader(dataset_path, metadata, set_=set_, **kwargs)
| bsd-3-clause |
sunshineDrizzle/FreeROI | froi/algorithm/meshtool.py | 2 | 36643 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
import os
import subprocess
import numpy as np
from scipy import sparse
from scipy.spatial.distance import cdist, pdist
from scipy.stats import pearsonr
def _fast_cross_3d(x, y):
"""Compute cross product between list of 3D vectors
Much faster than np.cross() when the number of cross products
becomes large (>500). This is because np.cross() methods become
less memory efficient at this stage.
Parameters
----------
x : array
Input array 1.
y : array
Input array 2.
Returns
-------
z : array
Cross product of x and y.
Notes
-----
x and y must both be 2D row vectors. One must have length 1, or both
lengths must match.
"""
assert x.ndim == 2
assert y.ndim == 2
assert x.shape[1] == 3
assert y.shape[1] == 3
assert (x.shape[0] == 1 or y.shape[0] == 1) or x.shape[0] == y.shape[0]
if max([x.shape[0], y.shape[0]]) >= 500:
return np.c_[x[:, 1] * y[:, 2] - x[:, 2] * y[:, 1],
x[:, 2] * y[:, 0] - x[:, 0] * y[:, 2],
x[:, 0] * y[:, 1] - x[:, 1] * y[:, 0]]
else:
return np.cross(x, y)
def compute_normals(rr, tris):
"""Efficiently compute vertex normals for triangulated surface"""
# first, compute triangle normals
r1 = rr[tris[:, 0], :]
r2 = rr[tris[:, 1], :]
r3 = rr[tris[:, 2], :]
tri_nn = _fast_cross_3d((r2 - r1), (r3 - r1))
# Triangle normals and areas
size = np.sqrt(np.sum(tri_nn * tri_nn, axis=1))
zidx = np.where(size == 0)[0]
# prevent ugly divide-by-zero
size[zidx] = 1.0
tri_nn /= size[:, np.newaxis]
npts = len(rr)
# the following code replaces this, but is faster (vectorized):
#
# for p, verts in enumerate(tris):
# nn[verts, :] += tri_nn[p, :]
#
nn = np.zeros((npts, 3))
# note this only loops 3x (number of verts per tri)
for verts in tris.T:
for idx in range(3): # x, y, z
nn[:, idx] += np.bincount(verts, tri_nn[:, idx], minlength=npts)
size = np.sqrt(np.sum(nn * nn, axis=1))
# prevent ugly divide-by-zero
size[size == 0] = 1.0
nn /= size[:, np.newaxis]
return nn
def find_closest_vertices(surface_coords, point_coords):
"""Return the vertices on a surface mesh closest to some
given coordinates.
The distance metric used is Euclidian distance.
Parameters
----------
surface_coords : numpy array
Array of coordinates on a surface mesh
point_coords : numpy array
Array of coordinates to map to vertices
Returns
-------
closest_vertices : numpy array
Array of mesh vertex ids
"""
point_coords = np.atleast_2d(point_coords)
return np.argmin(cdist(surface_coords, point_coords), axis=0)
def tal_to_mni(coords):
"""Convert Talairach coords to MNI using the Lancaster transform.
Parameters
----------
coords : n x 3 numpy array
Array of Talairach coordinates
Returns
-------
mni_coords : n x 3 numpy array
Array of coordinates converted to MNI space
"""
coords = np.atleast_2d(coords)
xfm = np.array([[1.06860, -0.00396, 0.00826, 1.07816],
[0.00640, 1.05741, 0.08566, 1.16824],
[-0.01281, -0.08863, 1.10792, -4.17805],
[0.00000, 0.00000, 0.00000, 1.00000]])
mni_coords = np.dot(np.c_[coords, np.ones(coords.shape[0])], xfm.T)[:, :3]
return mni_coords
def mesh_edges(faces):
"""
Returns sparse matrix with edges as an adjacency matrix
Parameters
----------
faces : array of shape [n_triangles x 3]
The mesh faces
Returns
-------
edges : sparse matrix
The adjacency matrix
"""
npoints = np.max(faces) + 1
nfaces = len(faces)
a, b, c = faces.T
edges = sparse.coo_matrix((np.ones(nfaces), (a, b)),
shape=(npoints, npoints))
edges = edges + sparse.coo_matrix((np.ones(nfaces), (b, c)),
shape=(npoints, npoints))
edges = edges + sparse.coo_matrix((np.ones(nfaces), (c, a)),
shape=(npoints, npoints))
edges = edges + edges.T
edges = edges.tocoo()
return edges
def create_color_lut(cmap, n_colors=256):
"""Return a colormap suitable for setting as a Mayavi LUT.
Parameters
----------
cmap : string, list of colors, n x 3 or n x 4 array
Input colormap definition. This can be the name of a matplotlib
colormap, a list of valid matplotlib colors, or a suitable
mayavi LUT (possibly missing the alpha channel).
n_colors : int, optional
Number of colors in the resulting LUT. This is ignored if cmap
is a 2d array.
Returns
-------
lut : n_colors x 4 integer array
Color LUT suitable for passing to mayavi
"""
if isinstance(cmap, np.ndarray):
if np.ndim(cmap) == 2:
if cmap.shape[1] == 4:
# This looks likes a LUT that's ready to go
lut = cmap.astype(np.int)
elif cmap.shape[1] == 3:
# This looks like a LUT, but it's missing the alpha channel
alpha = np.ones(len(cmap), np.int) * 255
lut = np.c_[cmap, alpha]
return lut
# Otherwise, we're going to try and use matplotlib to create it
if cmap in dir(cm):
# This is probably a matplotlib colormap, so build from that
# The matplotlib colormaps are a superset of the mayavi colormaps
# except for one or two cases (i.e. blue-red, which is a crappy
# rainbow colormap and shouldn't be used for anything, although in
# its defense it's better than "Jet")
cmap = getattr(cm, cmap)
elif np.iterable(cmap):
# This looks like a list of colors? Let's try that.
colors = list(map(mpl.colors.colorConverter.to_rgb, cmap))
cmap = mpl.colors.LinearSegmentedColormap.from_list("_", colors)
else:
# If we get here, it's a bad input
raise ValueError("Input %s was not valid for making a lut" % cmap)
# Convert from a matplotlib colormap to a lut array
lut = (cmap(np.linspace(0, 1, n_colors)) * 255).astype(np.int)
return lut
def smoothing_matrix(vertices, adj_mat, smoothing_steps=20, verbose=None):
"""Create a smoothing matrix which can be used to interpolate data defined
for a subset of vertices onto mesh with an adjancency matrix given by
adj_mat.
If smoothing_steps is None, as many smoothing steps are applied until
the whole mesh is filled with with non-zeros. Only use this option if
the vertices correspond to a subsampled version of the mesh.
Parameters
----------
vertices : 1d array
vertex indices
adj_mat : sparse matrix
N x N adjacency matrix of the full mesh
smoothing_steps : int or None
number of smoothing steps (Default: 20)
verbose : bool, str, int, or None
If not None, override default verbose level (see surfer.verbose).
Returns
-------
smooth_mat : sparse matrix
smoothing matrix with size N x len(vertices)
"""
from scipy import sparse
logger.info("Updating smoothing matrix, be patient..")
e = adj_mat.copy()
e.data[e.data == 2] = 1
n_vertices = e.shape[0]
e = e + sparse.eye(n_vertices, n_vertices)
idx_use = vertices
smooth_mat = 1.0
n_iter = smoothing_steps if smoothing_steps is not None else 1000
for k in range(n_iter):
e_use = e[:, idx_use]
data1 = e_use * np.ones(len(idx_use))
idx_use = np.where(data1)[0]
scale_mat = sparse.dia_matrix((1 / data1[idx_use], 0),
shape=(len(idx_use), len(idx_use)))
smooth_mat = scale_mat * e_use[idx_use, :] * smooth_mat
logger.info("Smoothing matrix creation, step %d" % (k + 1))
if smoothing_steps is None and len(idx_use) >= n_vertices:
break
# Make sure the smoothing matrix has the right number of rows
# and is in COO format
smooth_mat = smooth_mat.tocoo()
smooth_mat = sparse.coo_matrix((smooth_mat.data,
(idx_use[smooth_mat.row],
smooth_mat.col)),
shape=(n_vertices,
len(vertices)))
return smooth_mat
def coord_to_label(subject_id, coord, label, hemi='lh', n_steps=30,
map_surface='white', coord_as_vert=False, verbose=None):
"""Create label from MNI coordinate
Parameters
----------
subject_id : string
Use if file is in register with subject's orig.mgz
coord : numpy array of size 3 | int
One coordinate in MNI space or the vertex index.
label : str
Label name
hemi : [lh, rh]
Hemisphere target
n_steps : int
Number of dilation iterations
map_surface : str
The surface name used to find the closest point
coord_as_vert : bool
whether the coords parameter should be interpreted as vertex ids
verbose : bool, str, int, or None
If not None, override default verbose level (see surfer.verbose).
"""
geo = Surface(subject_id, hemi, map_surface)
geo.load_geometry()
if coord_as_vert:
coord = geo.coords[coord]
n_vertices = len(geo.coords)
adj_mat = mesh_edges(geo.faces)
foci_vtxs = find_closest_vertices(geo.coords, [coord])
data = np.zeros(n_vertices)
data[foci_vtxs] = 1.
smooth_mat = smoothing_matrix(np.arange(n_vertices), adj_mat, 1)
for _ in range(n_steps):
data = smooth_mat * data
idx = np.where(data.ravel() > 0)[0]
# Write label
label_fname = label + '-' + hemi + '.label'
logger.info("Saving label : %s" % label_fname)
f = open(label_fname, 'w')
f.write('#label at %s from subject %s\n' % (coord, subject_id))
f.write('%d\n' % len(idx))
for i in idx:
x, y, z = geo.coords[i]
f.write('%d %f %f %f 0.000000\n' % (i, x, y, z))
def _get_subjects_dir(subjects_dir=None, raise_error=True):
"""Get the subjects directory from parameter or environment variable
Parameters
----------
subjects_dir : str | None
The subjects directory.
raise_error : bool
If True, raise a ValueError if no value for SUBJECTS_DIR can be found
or the corresponding directory does not exist.
Returns
-------
subjects_dir : str
The subjects directory. If the subjects_dir input parameter is not
None, its value will be returned, otherwise it will be obtained from
the SUBJECTS_DIR environment variable.
"""
if subjects_dir is None:
subjects_dir = os.environ.get("SUBJECTS_DIR", "")
if not subjects_dir and raise_error:
raise ValueError('The subjects directory has to be specified '
'using the subjects_dir parameter or the '
'SUBJECTS_DIR environment variable.')
if raise_error and not os.path.exists(subjects_dir):
raise ValueError('The subjects directory %s does not exist.'
% subjects_dir)
return subjects_dir
def has_fsaverage(subjects_dir=None):
"""Determine whether the user has a usable fsaverage"""
fs_dir = os.path.join(_get_subjects_dir(subjects_dir, False), 'fsaverage')
if not os.path.isdir(fs_dir):
return False
if not os.path.isdir(os.path.join(fs_dir, 'surf')):
return False
return True
requires_fsaverage = np.testing.dec.skipif(not has_fsaverage(),
'Requires fsaverage subject data')
# --- check ffmpeg
def has_ffmpeg():
"""Test whether the FFmpeg is available in a subprocess
Returns
-------
ffmpeg_exists : bool
True if FFmpeg can be successfully called, False otherwise.
"""
try:
subprocess.call(["ffmpeg"], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
return True
except OSError:
return False
def assert_ffmpeg_is_available():
"Raise a RuntimeError if FFmpeg is not in the PATH"
if not has_ffmpeg():
err = ("FFmpeg is not in the path and is needed for saving "
"movies. Install FFmpeg and try again. It can be "
"downlaoded from http://ffmpeg.org/download.html.")
raise RuntimeError(err)
requires_ffmpeg = np.testing.dec.skipif(not has_ffmpeg(), 'Requires FFmpeg')
def ffmpeg(dst, frame_path, framerate=24, codec='mpeg4', bitrate='1M'):
"""Run FFmpeg in a subprocess to convert an image sequence into a movie
Parameters
----------
dst : str
Destination path. If the extension is not ".mov" or ".avi", ".mov" is
added. If the file already exists it is overwritten.
frame_path : str
Path to the source frames (with a frame number field like '%04d').
framerate : float
Framerate of the movie (frames per second, default 24).
codec : str | None
Codec to use (default 'mpeg4'). If None, the codec argument is not
forwarded to ffmpeg, which preserves compatibility with very old
versions of ffmpeg
bitrate : str | float
Bitrate to use to encode movie. Can be specified as number (e.g.
64000) or string (e.g. '64k'). Default value is 1M
Notes
-----
Requires FFmpeg to be in the path. FFmpeg can be downlaoded from `here
<http://ffmpeg.org/download.html>`_. Stdout and stderr are written to the
logger. If the movie file is not created, a RuntimeError is raised.
"""
assert_ffmpeg_is_available()
# find target path
dst = os.path.expanduser(dst)
dst = os.path.abspath(dst)
root, ext = os.path.splitext(dst)
dirname = os.path.dirname(dst)
if ext not in ['.mov', '.avi']:
dst += '.mov'
if os.path.exists(dst):
os.remove(dst)
elif not os.path.exists(dirname):
os.mkdir(dirname)
frame_dir, frame_fmt = os.path.split(frame_path)
# make the movie
cmd = ['ffmpeg', '-i', frame_fmt, '-r', str(framerate),
'-b:v', str(bitrate)]
if codec is not None:
cmd += ['-c', codec]
cmd += [dst]
logger.info("Running FFmpeg with command: %s", ' '.join(cmd))
sp = subprocess.Popen(cmd, cwd=frame_dir, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# log stdout and stderr
stdout, stderr = sp.communicate()
std_info = os.linesep.join(("FFmpeg stdout", '=' * 25, stdout))
logger.info(std_info)
if stderr.strip():
err_info = os.linesep.join(("FFmpeg stderr", '=' * 27, stderr))
# FFmpeg prints to stderr in the absence of an error
logger.info(err_info)
# check that movie file is created
if not os.path.exists(dst):
err = ("FFmpeg failed, no file created; see log for more more "
"information.")
raise RuntimeError(err)
def get_n_ring_neighbor(faces, n=1, ordinal=False, mask=None):
"""
get n ring neighbor from faces array
Parameters
----------
faces : numpy array
the array of shape [n_triangles, 3]
n : integer
specify which ring should be got
ordinal : bool
True: get the n_th ring neighbor
False: get the n ring neighbor
mask : 1-D numpy array
specify a area where the ROI is
non-ROI element's value is zero
Returns
-------
lists
each index of the list represents a vertex number
each element is a set which includes neighbors of corresponding vertex
"""
n_vtx = np.max(faces) + 1 # get the number of vertices
if mask is not None and np.nonzero(mask)[0].shape[0] == n_vtx:
# In this case, the mask covers all vertices and is equal to have no mask (None).
# So the program reset it as a None that it will save the computational cost.
mask = None
# find 1_ring neighbors' id for each vertex
coo_w = mesh_edges(faces)
csr_w = coo_w.tocsr()
if mask is None:
vtx_iter = range(n_vtx)
n_ring_neighbors = [csr_w.indices[csr_w.indptr[i]:csr_w.indptr[i+1]] for i in vtx_iter]
n_ring_neighbors = [set(i) for i in n_ring_neighbors]
else:
mask_id = np.nonzero(mask)[0]
vtx_iter = mask_id
n_ring_neighbors = [set(csr_w.indices[csr_w.indptr[i]:csr_w.indptr[i+1]])
if mask[i] != 0 else set() for i in range(n_vtx)]
for vtx in vtx_iter:
neighbor_set = n_ring_neighbors[vtx]
neighbor_iter = list(neighbor_set)
for i in neighbor_iter:
if mask[i] == 0:
neighbor_set.discard(i)
if n > 1:
# find n_ring neighbors
one_ring_neighbors = [i.copy() for i in n_ring_neighbors]
n_th_ring_neighbors = [i.copy() for i in n_ring_neighbors]
# if n>1, go to get more neighbors
for i in range(n-1):
for neighbor_set in n_th_ring_neighbors:
neighbor_set_tmp = neighbor_set.copy()
for v_id in neighbor_set_tmp:
neighbor_set.update(one_ring_neighbors[v_id])
if i == 0:
for v_id in vtx_iter:
n_th_ring_neighbors[v_id].remove(v_id)
for v_id in vtx_iter:
n_th_ring_neighbors[v_id] -= n_ring_neighbors[v_id] # get the (i+2)_th ring neighbors
n_ring_neighbors[v_id] |= n_th_ring_neighbors[v_id] # get the (i+2) ring neighbors
elif n == 1:
n_th_ring_neighbors = n_ring_neighbors
else:
raise RuntimeError("The number of rings should be equal or greater than 1!")
if ordinal:
return n_th_ring_neighbors
else:
return n_ring_neighbors
def get_vtx_neighbor(vtx, faces, n=1, ordinal=False, mask=None):
"""
Get one vertex's n-ring neighbor vertices
Parameters
----------
vtx : integer
a vertex's id
faces : numpy array
the array of shape [n_triangles, 3]
n : integer
specify which ring should be got
ordinal : bool
True: get the n_th ring neighbor
False: get the n ring neighbor
mask : 1-D numpy array
specify a area where the ROI is in.
Return
------
neighbors : set
contain neighbors of the vtx
"""
n_ring_neighbors = _get_vtx_neighbor(vtx, faces, mask)
n_th_ring_neighbors = n_ring_neighbors.copy()
for i in range(n-1):
neighbors_tmp = set()
for neighbor in n_th_ring_neighbors:
neighbors_tmp.update(_get_vtx_neighbor(neighbor, faces, mask))
if i == 0:
neighbors_tmp.discard(vtx)
n_th_ring_neighbors = neighbors_tmp.difference(n_ring_neighbors)
n_ring_neighbors.update(n_th_ring_neighbors)
if ordinal:
return n_th_ring_neighbors
else:
return n_ring_neighbors
def _get_vtx_neighbor(vtx, faces, mask=None):
"""
Get one vertex's 1-ring neighbor vertices
Parameters
----------
vtx : integer
a vertex's id
faces : numpy array
the array of shape [n_triangles, 3]
mask : 1-D numpy array
specify a area where the ROI is in.
Return
------
neighbors : set
contain neighbors of the vtx
"""
row_indices, _ = np.where(faces == vtx)
neighbors = set(np.unique(faces[row_indices]))
neighbors.discard(vtx)
if mask is not None:
neighbor_iter = list(neighbors)
for i in neighbor_iter:
if mask[i] == 0:
neighbors.discard(i)
return neighbors
def mesh2edge_list(faces, n=1, ordinal=False, mask=None, vtx_signal=None,
weight_type=('dissimilar', 'euclidean'), weight_normalization=False):
"""
get edge_list according to mesh's geometry and vtx_signal
The edge_list can be used to create graph or adjacent matrix
Parameters
----------
faces : a array with shape (n_triangles, 3)
n : integer
specify which ring should be got
ordinal : bool
True: get the n_th ring neighbor
False: get the n ring neighbor
mask : 1-D numpy array
specify a area where the ROI is
non-ROI element's value is zero
vtx_signal : numpy array
NxM array, N is the number of vertices,
M is the number of measurements and time points.
weight_type : (str1, str2)
The rule used for calculating weights
such as ('dissimilar', 'euclidean') and ('similar', 'pearson correlation')
weight_normalization : bool
If it is False, do nothing.
If it is True, normalize weights to [0, 1].
After doing this, greater the weight is, two vertices of the edge are more related.
Returns
-------
row_ind : list
row indices of edges
col_ind : list
column indices of edges
edge_data : list
edge data of the edges-zip(row_ind, col_ind)
"""
n_ring_neighbors = get_n_ring_neighbor(faces, n, ordinal, mask)
row_ind = [i for i, neighbors in enumerate(n_ring_neighbors) for v_id in neighbors]
col_ind = [v_id for neighbors in n_ring_neighbors for v_id in neighbors]
if vtx_signal is None:
# create unweighted edges
n_edge = len(row_ind) # the number of edges
edge_data = np.ones(n_edge)
else:
# calculate weights according to mesh's geometry and vertices' signal
if weight_type[0] == 'dissimilar':
if weight_type[1] == 'euclidean':
edge_data = [pdist(vtx_signal[[i, j]], metric=weight_type[1])[0]
for i, j in zip(row_ind, col_ind)]
elif weight_type[1] == 'relative_euclidean':
edge_data = []
for i, j in zip(row_ind, col_ind):
euclidean = pdist(vtx_signal[[i, j]], metric='euclidean')[0]
sum_ij = np.sum(abs(vtx_signal[[i, j]]))
if sum_ij:
edge_data.append(float(euclidean) / sum_ij)
else:
edge_data.append(0)
else:
raise RuntimeError("The weight_type-{} is not supported now!".format(weight_type))
if weight_normalization:
max_dissimilar = np.max(edge_data)
min_dissimilar = np.min(edge_data)
edge_data = [(max_dissimilar-dist)/(max_dissimilar-min_dissimilar) for dist in edge_data]
elif weight_type[0] == 'similar':
if weight_type[1] == 'pearson correlation':
edge_data = [pearsonr(vtx_signal[i], vtx_signal[j])[0] for i, j in zip(row_ind, col_ind)]
elif weight_type[1] == 'mean':
edge_data = [np.mean(vtx_signal[[i, j]]) for i, j in zip(row_ind, col_ind)]
else:
raise RuntimeError("The weight_type-{} is not supported now!".format(weight_type))
if weight_normalization:
max_similar = np.max(edge_data)
min_similar = np.min(edge_data)
edge_data = [(simi-min_similar)/(max_similar-min_similar) for simi in edge_data]
else:
raise TypeError("The weight_type-{} is not supported now!".format(weight_type))
return row_ind, col_ind, edge_data
def mesh2adjacent_matrix(faces, n=1, ordinal=False, mask=None, vtx_signal=None,
weight_type=('dissimilar', 'euclidean'), weight_normalization=False):
"""
get adjacent matrix according to mesh's geometry and vtx_signal
Parameters
----------
faces : a array with shape (n_triangles, 3)
n : integer
specify which ring should be got
ordinal : bool
True: get the n_th ring neighbor
False: get the n ring neighbor
mask : 1-D numpy array
specify a area where the ROI is
non-ROI element's value is zero
vtx_signal : numpy array
NxM array, N is the number of vertices,
M is the number of measurements and time points.
weight_type : (str1, str2)
The rule used for calculating weights
such as ('dissimilar', 'euclidean') and ('similar', 'pearson correlation')
weight_normalization : bool
If it is False, do nothing.
If it is True, normalize weights to [0, 1].
After doing this, greater the weight is, two vertices of the edge are more related.
Returns
-------
adjacent_matrix : coo matrix
"""
n_vtx = np.max(faces) + 1
row_ind, col_ind, edge_data = mesh2edge_list(faces, n, ordinal, mask, vtx_signal,
weight_type, weight_normalization)
adjacent_matrix = sparse.coo_matrix((edge_data, (row_ind, col_ind)), (n_vtx, n_vtx))
return adjacent_matrix
def mesh2graph(faces, n=1, ordinal=False, mask=None, vtx_signal=None,
weight_type=('dissimilar', 'euclidean'), weight_normalization=True):
"""
create graph according to mesh's geometry and vtx_signal
Parameters
----------
faces : a array with shape (n_triangles, 3)
n : integer
specify which ring should be got
ordinal : bool
True: get the n_th ring neighbor
False: get the n ring neighbor
mask : 1-D numpy array
specify a area where the ROI is
non-ROI element's value is zero
vtx_signal : numpy array
NxM array, N is the number of vertices,
M is the number of measurements and time points.
weight_type : (str1, str2)
The rule used for calculating weights
such as ('dissimilar', 'euclidean') and ('similar', 'pearson correlation')
weight_normalization : bool
If it is False, do nothing.
If it is True, normalize weights to [0, 1].
After doing this, greater the weight is, two vertices of the edge are more related.
Returns
-------
graph : nx.Graph
"""
row_ind, col_ind, edge_data = mesh2edge_list(faces, n, ordinal, mask, vtx_signal,
weight_type, weight_normalization)
graph = Graph()
# Actually, add_weighted_edges_from is only used to add edges. If we intend to create graph by the method only,
# all of the graph's nodes must have at least one edge. However, maybe some special graphs contain nodes
# which have no edge connected. So we need add extra nodes.
if mask is None:
n_vtx = np.max(faces) + 1
graph.add_nodes_from(range(n_vtx))
else:
vertices = np.nonzero(mask)[0]
graph.add_nodes_from(vertices)
# add_weighted_edges_from is faster than from_scipy_sparse_matrix and from_numpy_matrix
# add_weighted_edges_from is also faster than default constructor
# To get more related information, please refer to
# http://stackoverflow.com/questions/24681677/transform-csr-matrix-into-networkx-graph
graph.add_weighted_edges_from(zip(row_ind, col_ind, edge_data))
return graph
def binary_shrink(bin_data, faces, n=1, n_ring_neighbors=None):
"""
shrink bin_data
Parameters
----------
bin_data : 1-D numpy array
Each array index is corresponding to vertex id in the faces.
Each element is a bool.
faces : numpy array
the array of shape [n_triangles, 3]
n : integer
specify which ring should be got
n_ring_neighbors : list
If this parameter is not None, two parameters ('faces', 'n') will be ignored.
It is used to save time when someone repeatedly uses the function with
a same n_ring_neighbors which can be got by get_n_ring_neighbor.
The indices are vertices' id of a mesh.
One index's corresponding element is a collection of vertices which connect with the index.
Return
------
new_data : 1-D numpy array with bool elements
The output of the bin_data after binary shrink
"""
if bin_data.dtype != np.bool:
raise TypeError("The input dtype must be bool")
vertices = np.where(bin_data)[0]
new_data = np.zeros_like(bin_data)
if n_ring_neighbors is None:
n_ring_neighbors = get_n_ring_neighbor(faces, n)
for v_id in vertices:
neighbors_values = [bin_data[_] for _ in n_ring_neighbors[v_id]]
if np.all(neighbors_values):
new_data[v_id] = True
return new_data
def binary_expand(bin_data, faces, n=1, n_ring_neighbors=None):
"""
expand bin_data
Parameters
----------
bin_data : 1-D numpy array
Each array index is corresponding to vertex id in the faces.
Each element is a bool.
faces : numpy array
the array of shape [n_triangles, 3]
n : integer
specify which ring should be got
n_ring_neighbors : list
If this parameter is not None, two parameters ('faces' and 'n') will be ignored.
It is used to save time when someone repeatedly uses the function with
a same n_ring_neighbors which can be got by get_n_ring_neighbor.
The indices are vertices' id of a mesh.
One index's corresponding element is a collection of vertices which connect with the index.
Return
------
new_data : 1-D numpy array with bool elements
The output of the bin_data after binary expand
"""
if bin_data.dtype != np.bool:
raise TypeError("The input dtype must be bool")
vertices = np.where(bin_data)[0]
new_data = bin_data.copy()
if n_ring_neighbors is None:
n_ring_neighbors = get_n_ring_neighbor(faces, n)
for v_id in vertices:
neighbors_values = [bin_data[_] for _ in n_ring_neighbors[v_id]]
if not np.all(neighbors_values):
new_data[list(n_ring_neighbors[v_id])] = True
return new_data
def label_edge_detection(data, faces, edge_type="inner", neighbors=None):
"""
edge detection for labels
Parameters
----------
data : 1-D numpy array
Each array index is corresponding to vertex id in the faces.
Each element is a label id.
faces : numpy array
the array of shape [n_triangles, 3]
edge_type : str
"inner" means inner edges of labels.
"outer" means outer edges of labels.
"both" means both of them in one array
"split" means returning inner and outer edges in two arrays respectively
neighbors : list
If this parameter is not None, a parameters ('faces') will be ignored.
It is used to save time when someone repeatedly uses the function with
a same neighbors which can be got by get_n_ring_neighbor.
The indices are vertices' id of a mesh.
One index's corresponding element is a collection of vertices which connect with the index.
Return
------
inner_data : 1-D numpy array
the inner edges of the labels
outer_data : 1-D numpy array
the outer edges of the labels
It's worth noting that outer_data's element values may
be not strictly corresponding to labels' id when
there are some labels which are too close.
"""
# data preparation
vertices = np.nonzero(data)[0]
inner_data = np.zeros_like(data)
outer_data = np.zeros_like(data)
if neighbors is None:
neighbors = get_n_ring_neighbor(faces)
# look for edges
for v_id in vertices:
neighbors_values = [data[_] for _ in neighbors[v_id]]
if min(neighbors_values) != max(neighbors_values):
if edge_type in ("inner", "both", "split"):
inner_data[v_id] = data[v_id]
if edge_type in ("outer", "both", "split"):
outer_vtx = [vtx for vtx in neighbors[v_id] if data[v_id] != data[vtx]]
outer_data[outer_vtx] = data[v_id]
# return results
if edge_type == "inner":
return inner_data
elif edge_type == "outer":
return outer_data
elif edge_type == "both":
return inner_data + outer_data
elif edge_type == "split":
return inner_data, outer_data
else:
raise ValueError("The argument 'edge_type' must be one of the (inner, outer, both, split)")
def get_patch_by_crg(vertices, neighbors_list):
"""
Find patches in the 'vertices', as a result, a vertex is capable of connecting with other vertices
in the same patch, but can't connect with vertices in other patches.
The function is similar as connected component detection in graph theory.
:param vertices: set
:param neighbors_list: list
The indices are vertices' id of a mesh.
One index's corresponding element is a collection of vertices which connect with the index.
:return: patches
Each element of it is a collection of vertices, that is a patch.
"""
from froi.algorithm.regiongrow import RegionGrow
patches = []
while vertices:
seed = vertices.pop()
patch = RegionGrow().connectivity_grow([[seed]], neighbors_list)[0]
patches.append(list(patch))
vertices.difference_update(patch)
return patches
class LabelAssessment(object):
@staticmethod
def transition_level(label, data, faces, neighbors=None, relative=False):
"""
Calculate the transition level on the region's boundary.
The result is regarded as the region's assessed value.
Adapted from (Chantal et al. 2002).
Parameters
----------
label : list
a collection of vertices with the label
data : numpy array
scalar data with the shape (#vertices, #features)
faces : numpy array
the array of shape [n_triangles, 3]
neighbors : list
If this parameter is not None, the parameter ('faces') will be ignored.
It is used to save time when someone repeatedly uses the function with
a same neighbors which can be got by get_n_ring_neighbor.
The indices are vertices' id of a mesh.
One index's corresponding element is a collection of vertices which connect with the index.
relative: bool
If True, divide the transition level by the sum of the couple's absolute value.
Return
------
assessed_value : float
Larger is often better.
"""
label_data = np.zeros_like(data, dtype=np.int8)
label_data[label] = 1
inner_data = label_edge_detection(label_data, faces, "inner", neighbors)
inner_edge = np.nonzero(inner_data)[0]
count = 0
sum_tmp = 0
for vtx_i in inner_edge:
for vtx_o in neighbors[vtx_i]:
if label_data[vtx_o] == 0:
couple_signal = data[[vtx_i, vtx_o]]
euclidean = float(pdist(couple_signal)[0])
if relative:
denominator = np.sum(abs(couple_signal))
euclidean = euclidean / denominator if denominator else 0
sum_tmp += euclidean
count += 1
return sum_tmp / float(count) if count else 0
if __name__ == '__main__':
from nibabel.freesurfer import read_geometry
from froi.io.surf_io import read_scalar_data
from networkx import Graph
from graph_tool import graph2parcel, node_attr2array
import nibabel as nib
coords, faces = read_geometry('/nfs/t1/nsppara/corticalsurface/fsaverage5/surf/rh.inflated')
scalar = read_scalar_data('/nfs/t3/workingshop/chenxiayu/data/region-growing-froi/S1/surf/'
'rh_zstat1_1w_fracavg.mgz')
# faces = np.array([[1, 2, 3], [0, 1, 3]])
# scalar = np.array([[1], [2], [3], [4]])
graph = mesh2graph(faces, vtx_signal=scalar, weight_normalization=True)
graph, parcel_neighbors = graph2parcel(graph, n=5000)
labels = [attrs['label'] for attrs in graph.node.values()]
print 'finish ncut!'
labels = np.unique(labels)
print len(labels)
print np.max(labels)
arr = node_attr2array(graph, ('label',))
# zero_idx = np.where(map(lambda x: x not in parcel_neighbors[800], arr))
# arr[zero_idx[0]] = 0
nib.save(nib.Nifti1Image(arr, np.eye(4)), '/nfs/t3/workingshop/chenxiayu/test/cxy/ncut_label_1w_5000.nii')
| bsd-3-clause |
h2educ/scikit-learn | examples/mixture/plot_gmm_sin.py | 248 | 2747 | """
=================================
Gaussian Mixture Model Sine Curve
=================================
This example highlights the advantages of the Dirichlet Process:
complexity control and dealing with sparse data. The dataset is formed
by 100 points loosely spaced following a noisy sine curve. The fit by
the GMM class, using the expectation-maximization algorithm to fit a
mixture of 10 Gaussian components, finds too-small components and very
little structure. The fits by the Dirichlet process, however, show
that the model can either learn a global structure for the data (small
alpha) or easily interpolate to finding relevant local structure
(large alpha), never falling into the problems shown by the GMM class.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
from sklearn.externals.six.moves import xrange
# Number of samples per component
n_samples = 100
# Generate random sample following a sine curve
np.random.seed(0)
X = np.zeros((n_samples, 2))
step = 4 * np.pi / n_samples
for i in xrange(X.shape[0]):
x = i * step - 6
X[i, 0] = x + np.random.normal(0, 0.1)
X[i, 1] = 3 * (np.sin(x) + np.random.normal(0, .2))
color_iter = itertools.cycle(['r', 'g', 'b', 'c', 'm'])
for i, (clf, title) in enumerate([
(mixture.GMM(n_components=10, covariance_type='full', n_iter=100),
"Expectation-maximization"),
(mixture.DPGMM(n_components=10, covariance_type='full', alpha=0.01,
n_iter=100),
"Dirichlet Process,alpha=0.01"),
(mixture.DPGMM(n_components=10, covariance_type='diag', alpha=100.,
n_iter=100),
"Dirichlet Process,alpha=100.")]):
clf.fit(X)
splot = plt.subplot(3, 1, 1 + i)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(
clf.means_, clf._get_covars(), color_iter)):
v, w = linalg.eigh(covar)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-6, 4 * np.pi - 6)
plt.ylim(-5, 5)
plt.title(title)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
stonebig/bokeh | examples/plotting/file/elements.py | 5 | 1855 | import pandas as pd
from bokeh.models import ColumnDataSource, LabelSet
from bokeh.plotting import figure, show, output_file
from bokeh.sampledata.periodic_table import elements
elements = elements.copy()
elements = elements[elements["atomic number"] <= 82]
elements = elements[~pd.isnull(elements["melting point"])]
mass = [float(x.strip("[]")) for x in elements["atomic mass"]]
elements["atomic mass"] = mass
palette = ["#053061", "#2166ac", "#4393c3", "#92c5de", "#d1e5f0",
"#f7f7f7", "#fddbc7", "#f4a582", "#d6604d", "#b2182b", "#67001f"]
melting_points = elements["melting point"]
low = min(melting_points)
high = max(melting_points)
melting_point_inds = [int(10*(x-low)/(high-low)) for x in melting_points] #gives items in colors a value from 0-10
elements['melting_colors'] = [palette[i] for i in melting_point_inds]
TITLE = "Density vs Atomic Weight of Elements (colored by melting point)"
TOOLS = "hover,pan,wheel_zoom,box_zoom,reset,save"
p = figure(tools=TOOLS, toolbar_location="above", plot_width=1200, title=TITLE)
p.toolbar.logo = "grey"
p.background_fill_color = "#dddddd"
p.xaxis.axis_label = "atomic weight (amu)"
p.yaxis.axis_label = "density (g/cm^3)"
p.grid.grid_line_color = "white"
p.hover.tooltips = [
("name", "@name"),
("symbol:", "@symbol"),
("density", "@density"),
("atomic weight", "@{atomic mass}"),
("melting point", "@{melting point}")
]
source = ColumnDataSource(elements)
p.circle("atomic mass", "density", size=12, source=source,
color='melting_colors', line_color="black", fill_alpha=0.8)
labels = LabelSet(x="atomic mass", y="density", text="symbol", y_offset=8,
text_font_size="8pt", text_color="#555555",
source=source, text_align='center')
p.add_layout(labels)
output_file("elements.html", title="elements.py example")
show(p)
| bsd-3-clause |
bajibabu/merlin | src/work_in_progress/oliver/run_tpdnn.py | 2 | 101142 |
import pickle
import gzip
import os, sys, errno
import time
import math
# numpy & theano imports need to be done in this order (only for some numpy installations, not sure why)
import numpy
# we need to explicitly import this in some cases, not sure why this doesn't get imported with numpy itself
import numpy.distutils.__config__
# and only after that can we import theano
import theano
from utils.providers import ListDataProviderWithProjectionIndex, expand_projection_inputs, get_unexpanded_projection_inputs # ListDataProvider
from frontend.label_normalisation import HTSLabelNormalisation, XMLLabelNormalisation
from frontend.silence_remover import SilenceRemover
from frontend.silence_remover import trim_silence
from frontend.min_max_norm import MinMaxNormalisation
#from frontend.acoustic_normalisation import CMPNormalisation
from frontend.acoustic_composition import AcousticComposition
from frontend.parameter_generation import ParameterGeneration
#from frontend.feature_normalisation_base import FeatureNormBase
from frontend.mean_variance_norm import MeanVarianceNorm
# the new class for label composition and normalisation
from frontend.label_composer import LabelComposer
import configuration
from models.dnn import DNN
from models.tpdnn import TokenProjectionDNN
from models.ms_dnn import MultiStreamDNN
from models.ms_dnn_gv import MultiStreamDNNGv
from models.sdae import StackedDenoiseAutoEncoder
from utils.compute_distortion import DistortionComputation, IndividualDistortionComp
from utils.generate import generate_wav
from utils.learn_rates import ExpDecreaseLearningRate
#import matplotlib.pyplot as plt
# our custom logging class that can also plot
#from logplot.logging_plotting import LoggerPlotter, MultipleTimeSeriesPlot, SingleWeightMatrixPlot
from logplot.logging_plotting import LoggerPlotter, MultipleSeriesPlot, SingleWeightMatrixPlot
import logging # as logging
import logging.config
import io
## This should always be True -- tidy up later
expand_by_minibatch = True
if expand_by_minibatch:
proj_type = 'int32'
else:
proj_type = theano.config.floatX
def extract_file_id_list(file_list):
file_id_list = []
for file_name in file_list:
file_id = os.path.basename(os.path.splitext(file_name)[0])
file_id_list.append(file_id)
return file_id_list
def read_file_list(file_name):
logger = logging.getLogger("read_file_list")
file_lists = []
fid = open(file_name)
for line in fid.readlines():
line = line.strip()
if len(line) < 1:
continue
file_lists.append(line)
fid.close()
logger.debug('Read file list from %s' % file_name)
return file_lists
def make_output_file_list(out_dir, in_file_lists):
out_file_lists = []
for in_file_name in in_file_lists:
file_id = os.path.basename(in_file_name)
out_file_name = out_dir + '/' + file_id
out_file_lists.append(out_file_name)
return out_file_lists
def prepare_file_path_list(file_id_list, file_dir, file_extension, new_dir_switch=True):
if not os.path.exists(file_dir) and new_dir_switch:
os.makedirs(file_dir)
file_name_list = []
for file_id in file_id_list:
file_name = file_dir + '/' + file_id + file_extension
file_name_list.append(file_name)
return file_name_list
def visualize_dnn(dnn):
layer_num = len(dnn.params) / 2 ## including input and output
for i in range(layer_num):
fig_name = 'Activation weights W' + str(i)
fig_title = 'Activation weights of W' + str(i)
xlabel = 'Neuron index of hidden layer ' + str(i)
ylabel = 'Neuron index of hidden layer ' + str(i+1)
if i == 0:
xlabel = 'Input feature index'
if i == layer_num-1:
ylabel = 'Output feature index'
logger.create_plot(fig_name, SingleWeightMatrixPlot)
plotlogger.add_plot_point(fig_name, fig_name, dnn.params[i*2].get_value(borrow=True).T)
plotlogger.save_plot(fig_name, title=fig_name, xlabel=xlabel, ylabel=ylabel)
## Function for training projection and non-projection parts at same time
def train_DNN(train_xy_file_list, valid_xy_file_list, \
nnets_file_name, n_ins, n_outs, ms_outs, hyper_params, buffer_size, plot=False):
# get loggers for this function
# this one writes to both console and file
logger = logging.getLogger("main.train_DNN")
logger.debug('Starting train_DNN')
if plot:
# this one takes care of plotting duties
plotlogger = logging.getLogger("plotting")
# create an (empty) plot of training convergence, ready to receive data points
logger.create_plot('training convergence',MultipleSeriesPlot)
try:
assert numpy.sum(ms_outs) == n_outs
except AssertionError:
logger.critical('the summation of multi-stream outputs does not equal to %d' %(n_outs))
raise
####parameters#####
finetune_lr = float(hyper_params['learning_rate'])
training_epochs = int(hyper_params['training_epochs'])
batch_size = int(hyper_params['batch_size'])
l1_reg = float(hyper_params['l1_reg'])
l2_reg = float(hyper_params['l2_reg'])
private_l2_reg = float(hyper_params['private_l2_reg'])
warmup_epoch = int(hyper_params['warmup_epoch'])
momentum = float(hyper_params['momentum'])
warmup_momentum = float(hyper_params['warmup_momentum'])
hidden_layers_sizes = hyper_params['hidden_layers_sizes']
stream_weights = hyper_params['stream_weights']
private_hidden_sizes = hyper_params['private_hidden_sizes']
buffer_utt_size = buffer_size
early_stop_epoch = int(hyper_params['early_stop_epochs'])
hidden_activation = hyper_params['hidden_activation']
output_activation = hyper_params['output_activation']
stream_lr_weights = hyper_params['stream_lr_weights']
use_private_hidden = hyper_params['use_private_hidden']
model_type = hyper_params['model_type']
index_to_project = hyper_params['index_to_project']
projection_insize = hyper_params['projection_insize']
projection_outsize = hyper_params['projection_outsize']
## use a switch to turn on pretraining
## pretraining may not help too much, if this case, we turn it off to save time
do_pretraining = hyper_params['do_pretraining']
pretraining_epochs = int(hyper_params['pretraining_epochs'])
pretraining_lr = float(hyper_params['pretraining_lr'])
initial_projection_distrib = hyper_params['initial_projection_distrib']
buffer_size = int(buffer_size / batch_size) * batch_size
###################
(train_x_file_list, train_y_file_list) = train_xy_file_list
(valid_x_file_list, valid_y_file_list) = valid_xy_file_list
logger.debug('Creating training data provider')
train_data_reader = ListDataProviderWithProjectionIndex(x_file_list = train_x_file_list, y_file_list = train_y_file_list, n_ins = n_ins, n_outs = n_outs, buffer_size = buffer_size, shuffle = True, index_to_project=index_to_project, projection_insize=projection_insize, indexes_only=expand_by_minibatch)
logger.debug('Creating validation data provider')
valid_data_reader = ListDataProviderWithProjectionIndex(x_file_list = valid_x_file_list, y_file_list = valid_y_file_list, n_ins = n_ins, n_outs = n_outs, buffer_size = buffer_size, shuffle = False, index_to_project=index_to_project, projection_insize=projection_insize, indexes_only=expand_by_minibatch)
shared_train_set_xy, temp_train_set_x, temp_train_set_x_proj, temp_train_set_y = train_data_reader.load_next_partition_with_projection()
train_set_x, train_set_x_proj, train_set_y = shared_train_set_xy
shared_valid_set_xy, temp_valid_set_x, temp_valid_set_x_proj, temp_valid_set_y = valid_data_reader.load_next_partition_with_projection()
valid_set_x, valid_set_x_proj, valid_set_y = shared_valid_set_xy
train_data_reader.reset()
valid_data_reader.reset()
##temporally we use the training set as pretrain_set_x.
##we need to support any data for pretraining
pretrain_set_x = train_set_x
# numpy random generator
numpy_rng = numpy.random.RandomState(123)
logger.info('building the model')
dnn_model = None
pretrain_fn = None ## not all the model support pretraining right now
train_fn = None
valid_fn = None
valid_model = None ## valid_fn and valid_model are the same. reserve to computer multi-stream distortion
if model_type == 'DNN':
dnn_model = DNN(numpy_rng=numpy_rng, n_ins=n_ins, n_outs = n_outs,
l1_reg = l1_reg, l2_reg = l2_reg,
hidden_layers_sizes = hidden_layers_sizes,
hidden_activation = hidden_activation,
output_activation = output_activation)
train_fn, valid_fn = dnn_model.build_finetune_functions(
(train_set_x, train_set_y), (valid_set_x, valid_set_y), batch_size=batch_size)
elif model_type == 'TPDNN':
dnn_model = TokenProjectionDNN(numpy_rng=numpy_rng, n_ins=n_ins, n_outs = n_outs,
l1_reg = l1_reg, l2_reg = l2_reg,
hidden_layers_sizes = hidden_layers_sizes,
hidden_activation = hidden_activation,
output_activation = output_activation,
projection_insize=projection_insize, projection_outsize=projection_outsize,
expand_by_minibatch=expand_by_minibatch, initial_projection_distrib=initial_projection_distrib)
train_all_fn, train_subword_fn, train_word_fn, infer_projections_fn, valid_fn, valid_score_i = \
dnn_model.build_finetune_functions(
(train_set_x, train_set_x_proj, train_set_y),
(valid_set_x, valid_set_x_proj, valid_set_y), batch_size=batch_size)
elif model_type == 'SDAE':
##basic model is ready.
##if corruption levels is set to zero. it becomes normal autoencoder
dnn_model = StackedDenoiseAutoEncoder(numpy_rng=numpy_rng, n_ins=n_ins, n_outs = n_outs,
l1_reg = l1_reg, l2_reg = l2_reg,
hidden_layers_sizes = hidden_layers_sizes)
if do_pretraining:
pretraining_fn = dnn_model.pretraining_functions(pretrain_set_x, batch_size)
train_fn, valid_fn = dnn_model.build_finetune_functions(
(train_set_x, train_set_y), (valid_set_x, valid_set_y), batch_size=batch_size)
elif model_type == 'MSDNN': ##model is ready, but the hyper-parameters are not optimised.
dnn_model = MultiStreamDNN(numpy_rng=numpy_rng, n_ins=n_ins, ms_outs=ms_outs,
l1_reg = l1_reg, l2_reg = l2_reg,
hidden_layers_sizes = hidden_layers_sizes,
stream_weights = stream_weights,
hidden_activation = hidden_activation,
output_activation = output_activation)
train_fn, valid_fn = dnn_model.build_finetune_functions(
(train_set_x, train_set_y), (valid_set_x, valid_set_y),
batch_size=batch_size, lr_weights = stream_lr_weights)
elif model_type == 'MSDNN_GV': ## not fully ready
dnn_model = MultiStreamDNNGv(numpy_rng=numpy_rng, n_ins=n_ins, ms_outs=ms_outs,
l1_reg = l1_reg, l2_reg = l2_reg,
hidden_layers_sizes = hidden_layers_sizes,
stream_weights = stream_weights,
hidden_activation = hidden_activation,
output_activation = output_activation)
train_fn, valid_fn = dnn_model.build_finetune_functions(
(train_set_x, train_set_y), (valid_set_x, valid_set_y),
batch_size=batch_size, lr_weights = stream_lr_weights)
else:
logger.critical('%s type NN model is not supported!' %(model_type))
raise
## if pretraining is supported in one model, add the switch here
## be careful to use autoencoder for pretraining here:
## for SDAE, currently only sigmoid function is supported in the hidden layers, as our input is scaled to [0, 1]
## however, tanh works better and converge fast in finetuning
##
## Will extend this soon...
if do_pretraining and model_type == 'SDAE':
logger.info('pretraining the %s model' %(model_type))
corruption_level = 0.0
## in SDAE we do layer-wise pretraining using autoencoders
for i in range(dnn_model.n_layers):
for epoch in range(pretraining_epochs):
sub_start_time = time.clock()
pretrain_loss = []
while (not train_data_reader.is_finish()):
shared_train_set_xy, temp_train_set_x, temp_train_set_y = train_data_reader.load_next_partition()
pretrain_set_x.set_value(numpy.asarray(temp_train_set_x, dtype=theano.config.floatX), borrow=True)
n_train_batches = pretrain_set_x.get_value().shape[0] / batch_size
for batch_index in range(n_train_batches):
pretrain_loss.append(pretraining_fn[i](index=batch_index,
corruption=corruption_level,
learning_rate=pretraining_lr))
sub_end_time = time.clock()
logger.info('Pre-training layer %i, epoch %d, cost %s, time spent%.2f' % (i+1, epoch+1, numpy.mean(pretrain_loss), (sub_end_time - sub_start_time)))
train_data_reader.reset()
logger.info('fine-tuning the %s model' %(model_type))
start_time = time.clock()
best_dnn_model = dnn_model
best_validation_loss = sys.float_info.max
previous_loss = sys.float_info.max
early_stop = 0
epoch = 0
previous_finetune_lr = finetune_lr
while (epoch < training_epochs):
epoch = epoch + 1
current_momentum = momentum
current_finetune_lr = finetune_lr
if epoch <= warmup_epoch:
current_finetune_lr = finetune_lr
current_momentum = warmup_momentum
else:
current_finetune_lr = previous_finetune_lr * 0.5
previous_finetune_lr = current_finetune_lr
train_error = []
sub_start_time = time.clock()
while (not train_data_reader.is_finish()):
shared_train_set_xy, temp_train_set_x, temp_train_set_x_proj, temp_train_set_y = train_data_reader.load_next_partition_with_projection()
train_set_x.set_value(numpy.asarray(temp_train_set_x, dtype=theano.config.floatX), borrow=True)
train_set_x_proj.set_value(numpy.asarray(temp_train_set_x_proj, dtype=proj_type), borrow=True)
train_set_y.set_value(numpy.asarray(temp_train_set_y, dtype=theano.config.floatX), borrow=True)
n_train_batches = train_set_x.get_value().shape[0] / batch_size
logger.debug('this partition: %d frames (divided into %d batches of size %d)' %(train_set_x.get_value(borrow=True).shape[0], n_train_batches, batch_size) )
for minibatch_index in range(n_train_batches):
this_train_error = train_all_fn(minibatch_index, current_finetune_lr, current_momentum)
train_error.append(this_train_error)
if numpy.isnan(this_train_error):
logger.warning('training error over minibatch %d of %d was %s' % (minibatch_index+1,n_train_batches,this_train_error) )
train_data_reader.reset()
## osw -- getting validation error from a forward pass in a single batch
## exausts memory when using 20k projected vocab -- also use minibatches
logger.debug('calculating validation loss')
valid_error = []
n_valid_batches = valid_set_x.get_value().shape[0] / batch_size
for minibatch_index in range(n_valid_batches):
v_loss = valid_score_i(minibatch_index)
valid_error.append(v_loss)
this_validation_loss = numpy.mean(valid_error)
# this has a possible bias if the minibatches were not all of identical size
# but it should not be siginficant if minibatches are small
this_train_valid_loss = numpy.mean(train_error)
sub_end_time = time.clock()
loss_difference = this_validation_loss - previous_loss
logger.info('BASIC epoch %i, validation error %f, train error %f time spent %.2f' %(epoch, this_validation_loss, this_train_valid_loss, (sub_end_time - sub_start_time)))
if plot:
plotlogger.add_plot_point('training convergence','validation set',(epoch,this_validation_loss))
plotlogger.add_plot_point('training convergence','training set',(epoch,this_train_valid_loss))
plotlogger.save_plot('training convergence',title='Progress of training and validation error',xlabel='epochs',ylabel='error')
if this_validation_loss < best_validation_loss:
best_dnn_model = dnn_model
best_validation_loss = this_validation_loss
logger.debug('validation loss decreased, so saving model')
early_stop = 0
else:
logger.debug('validation loss did not improve')
dbn = best_dnn_model
early_stop += 1
if early_stop > early_stop_epoch:
# too many consecutive epochs without surpassing the best model
logger.debug('stopping early')
break
if math.isnan(this_validation_loss):
break
previous_loss = this_validation_loss
### Save projection values:
if cfg.hyper_params['model_type'] == 'TPDNN':
if not os.path.isdir(cfg.projection_weights_output_dir):
os.mkdir(cfg.projection_weights_output_dir)
weights = dnn_model.get_projection_weights()
fname = os.path.join(cfg.projection_weights_output_dir, 'proj_BASIC_epoch_%s'%(epoch))
numpy.savetxt(fname, weights)
end_time = time.clock()
pickle.dump(best_dnn_model, open(nnets_file_name, 'wb'))
logger.info('overall training time: %.2fm validation error %f' % ((end_time - start_time) / 60., best_validation_loss))
if plot:
plotlogger.save_plot('training convergence',title='Final training and validation error',xlabel='epochs',ylabel='error')
## Function for training all model on train data as well as simultaneously
## inferring proj weights on dev data.
# in each epoch do:
# train_all_fn()
# infer_projections_fn() ## <-- updates proj for devset and gives validation loss
def train_DNN_and_traindev_projections(train_xy_file_list, valid_xy_file_list, \
nnets_file_name, n_ins, n_outs, ms_outs, hyper_params, buffer_size, plot=False):
# get loggers for this function
# this one writes to both console and file
logger = logging.getLogger("main.train_DNN")
logger.debug('Starting train_DNN')
if plot:
# this one takes care of plotting duties
plotlogger = logging.getLogger("plotting")
# create an (empty) plot of training convergence, ready to receive data points
logger.create_plot('training convergence',MultipleSeriesPlot)
try:
assert numpy.sum(ms_outs) == n_outs
except AssertionError:
logger.critical('the summation of multi-stream outputs does not equal to %d' %(n_outs))
raise
####parameters#####
finetune_lr = float(hyper_params['learning_rate'])
training_epochs = int(hyper_params['training_epochs'])
batch_size = int(hyper_params['batch_size'])
l1_reg = float(hyper_params['l1_reg'])
l2_reg = float(hyper_params['l2_reg'])
private_l2_reg = float(hyper_params['private_l2_reg'])
warmup_epoch = int(hyper_params['warmup_epoch'])
momentum = float(hyper_params['momentum'])
warmup_momentum = float(hyper_params['warmup_momentum'])
hidden_layers_sizes = hyper_params['hidden_layers_sizes']
stream_weights = hyper_params['stream_weights']
private_hidden_sizes = hyper_params['private_hidden_sizes']
buffer_utt_size = buffer_size
early_stop_epoch = int(hyper_params['early_stop_epochs'])
hidden_activation = hyper_params['hidden_activation']
output_activation = hyper_params['output_activation']
stream_lr_weights = hyper_params['stream_lr_weights']
use_private_hidden = hyper_params['use_private_hidden']
model_type = hyper_params['model_type']
index_to_project = hyper_params['index_to_project']
projection_insize = hyper_params['projection_insize']
projection_outsize = hyper_params['projection_outsize']
## use a switch to turn on pretraining
## pretraining may not help too much, if this case, we turn it off to save time
do_pretraining = hyper_params['do_pretraining']
pretraining_epochs = int(hyper_params['pretraining_epochs'])
pretraining_lr = float(hyper_params['pretraining_lr'])
initial_projection_distrib = hyper_params['initial_projection_distrib']
buffer_size = int(buffer_size / batch_size) * batch_size
###################
(train_x_file_list, train_y_file_list) = train_xy_file_list
(valid_x_file_list, valid_y_file_list) = valid_xy_file_list
logger.debug('Creating training data provider')
train_data_reader = ListDataProviderWithProjectionIndex(x_file_list = train_x_file_list, y_file_list = train_y_file_list, n_ins = n_ins, n_outs = n_outs, buffer_size = buffer_size, shuffle = True, index_to_project=index_to_project, projection_insize=projection_insize, indexes_only=expand_by_minibatch)
logger.debug('Creating validation data provider')
valid_data_reader = ListDataProviderWithProjectionIndex(x_file_list = valid_x_file_list, y_file_list = valid_y_file_list, n_ins = n_ins, n_outs = n_outs, buffer_size = buffer_size, shuffle = False, index_to_project=index_to_project, projection_insize=projection_insize, indexes_only=expand_by_minibatch)
shared_train_set_xy, temp_train_set_x, temp_train_set_x_proj, temp_train_set_y = train_data_reader.load_next_partition_with_projection()
train_set_x, train_set_x_proj, train_set_y = shared_train_set_xy
shared_valid_set_xy, temp_valid_set_x, temp_valid_set_x_proj, temp_valid_set_y = valid_data_reader.load_next_partition_with_projection()
valid_set_x, valid_set_x_proj, valid_set_y = shared_valid_set_xy
train_data_reader.reset()
valid_data_reader.reset()
##temporally we use the training set as pretrain_set_x.
##we need to support any data for pretraining
pretrain_set_x = train_set_x
# numpy random generator
numpy_rng = numpy.random.RandomState(123)
logger.info('building the model')
dnn_model = None
pretrain_fn = None ## not all the model support pretraining right now
train_fn = None
valid_fn = None
valid_model = None ## valid_fn and valid_model are the same. reserve to computer multi-stream distortion
if model_type == 'DNN':
dnn_model = DNN(numpy_rng=numpy_rng, n_ins=n_ins, n_outs = n_outs,
l1_reg = l1_reg, l2_reg = l2_reg,
hidden_layers_sizes = hidden_layers_sizes,
hidden_activation = hidden_activation,
output_activation = output_activation)
train_fn, valid_fn = dnn_model.build_finetune_functions(
(train_set_x, train_set_y), (valid_set_x, valid_set_y), batch_size=batch_size)
elif model_type == 'TPDNN':
dnn_model = TokenProjectionDNN(numpy_rng=numpy_rng, n_ins=n_ins, n_outs = n_outs,
l1_reg = l1_reg, l2_reg = l2_reg,
hidden_layers_sizes = hidden_layers_sizes,
hidden_activation = hidden_activation,
output_activation = output_activation,
projection_insize=projection_insize, projection_outsize=projection_outsize,
expand_by_minibatch=expand_by_minibatch, initial_projection_distrib=initial_projection_distrib)
train_all_fn, train_subword_fn, train_word_fn, infer_projections_fn, valid_fn, valid_score_i = \
dnn_model.build_finetune_functions(
(train_set_x, train_set_x_proj, train_set_y),
(valid_set_x, valid_set_x_proj, valid_set_y), batch_size=batch_size)
elif model_type == 'SDAE':
##basic model is ready.
##if corruption levels is set to zero. it becomes normal autoencoder
dnn_model = StackedDenoiseAutoEncoder(numpy_rng=numpy_rng, n_ins=n_ins, n_outs = n_outs,
l1_reg = l1_reg, l2_reg = l2_reg,
hidden_layers_sizes = hidden_layers_sizes)
if do_pretraining:
pretraining_fn = dnn_model.pretraining_functions(pretrain_set_x, batch_size)
train_fn, valid_fn = dnn_model.build_finetune_functions(
(train_set_x, train_set_y), (valid_set_x, valid_set_y), batch_size=batch_size)
elif model_type == 'MSDNN': ##model is ready, but the hyper-parameters are not optimised.
dnn_model = MultiStreamDNN(numpy_rng=numpy_rng, n_ins=n_ins, ms_outs=ms_outs,
l1_reg = l1_reg, l2_reg = l2_reg,
hidden_layers_sizes = hidden_layers_sizes,
stream_weights = stream_weights,
hidden_activation = hidden_activation,
output_activation = output_activation)
train_fn, valid_fn = dnn_model.build_finetune_functions(
(train_set_x, train_set_y), (valid_set_x, valid_set_y),
batch_size=batch_size, lr_weights = stream_lr_weights)
elif model_type == 'MSDNN_GV': ## not fully ready
dnn_model = MultiStreamDNNGv(numpy_rng=numpy_rng, n_ins=n_ins, ms_outs=ms_outs,
l1_reg = l1_reg, l2_reg = l2_reg,
hidden_layers_sizes = hidden_layers_sizes,
stream_weights = stream_weights,
hidden_activation = hidden_activation,
output_activation = output_activation)
train_fn, valid_fn = dnn_model.build_finetune_functions(
(train_set_x, train_set_y), (valid_set_x, valid_set_y),
batch_size=batch_size, lr_weights = stream_lr_weights)
else:
logger.critical('%s type NN model is not supported!' %(model_type))
raise
## if pretraining is supported in one model, add the switch here
## be careful to use autoencoder for pretraining here:
## for SDAE, currently only sigmoid function is supported in the hidden layers, as our input is scaled to [0, 1]
## however, tanh works better and converge fast in finetuning
##
## Will extend this soon...
if do_pretraining and model_type == 'SDAE':
logger.info('pretraining the %s model' %(model_type))
corruption_level = 0.0
## in SDAE we do layer-wise pretraining using autoencoders
for i in range(dnn_model.n_layers):
for epoch in range(pretraining_epochs):
sub_start_time = time.clock()
pretrain_loss = []
while (not train_data_reader.is_finish()):
shared_train_set_xy, temp_train_set_x, temp_train_set_y = train_data_reader.load_next_partition()
pretrain_set_x.set_value(numpy.asarray(temp_train_set_x, dtype=theano.config.floatX), borrow=True)
n_train_batches = pretrain_set_x.get_value().shape[0] / batch_size
for batch_index in range(n_train_batches):
pretrain_loss.append(pretraining_fn[i](index=batch_index,
corruption=corruption_level,
learning_rate=pretraining_lr))
sub_end_time = time.clock()
logger.info('Pre-training layer %i, epoch %d, cost %s, time spent%.2f' % (i+1, epoch+1, numpy.mean(pretrain_loss), (sub_end_time - sub_start_time)))
train_data_reader.reset()
logger.info('fine-tuning the %s model' %(model_type))
start_time = time.clock()
best_dnn_model = dnn_model
best_validation_loss = sys.float_info.max
previous_loss = sys.float_info.max
early_stop = 0
epoch = 0
previous_finetune_lr = finetune_lr
##dnn_model.zero_projection_weights()
while (epoch < training_epochs):
epoch = epoch + 1
current_momentum = momentum
current_finetune_lr = finetune_lr
if epoch <= warmup_epoch:
current_finetune_lr = finetune_lr
current_momentum = warmup_momentum
else:
current_finetune_lr = previous_finetune_lr * 0.5
previous_finetune_lr = current_finetune_lr
train_error = []
sub_start_time = time.clock()
while (not train_data_reader.is_finish()):
shared_train_set_xy, temp_train_set_x, temp_train_set_x_proj, temp_train_set_y = train_data_reader.load_next_partition_with_projection()
train_set_x.set_value(numpy.asarray(temp_train_set_x, dtype=theano.config.floatX), borrow=True)
train_set_x_proj.set_value(numpy.asarray(temp_train_set_x_proj, dtype=proj_type), borrow=True)
train_set_y.set_value(numpy.asarray(temp_train_set_y, dtype=theano.config.floatX), borrow=True)
n_train_batches = train_set_x.get_value().shape[0] / batch_size
logger.debug('this partition: %d frames (divided into %d batches of size %d)' %(train_set_x.get_value(borrow=True).shape[0], n_train_batches, batch_size) )
for minibatch_index in range(n_train_batches):
this_train_error = train_all_fn(minibatch_index, current_finetune_lr, current_momentum)
train_error.append(this_train_error)
if numpy.isnan(this_train_error):
logger.warning('training error over minibatch %d of %d was %s' % (minibatch_index+1,n_train_batches,this_train_error) )
train_data_reader.reset()
## infer validation weights before getting validation error:
## osw -- inferring word reps on validation set in a forward pass in a single batch
## exausts memory when using 20k projected vocab -- also use minibatches
logger.debug('infer word representations for validation set')
valid_error = []
n_valid_batches = valid_set_x.get_value().shape[0] / batch_size
for minibatch_index in range(n_valid_batches):
v_loss = infer_projections_fn(minibatch_index, current_finetune_lr, current_momentum)
valid_error.append(v_loss)
## this function also give us validation loss:
this_validation_loss = numpy.mean(valid_error)
'''
## osw -- getting validation error from a forward pass in a single batch
## exausts memory when using 20k projected vocab -- also use minibatches
logger.debug('calculating validation loss')
valid_error = []
n_valid_batches = valid_set_x.get_value().shape[0] / batch_size
for minibatch_index in xrange(n_valid_batches):
v_loss = valid_score_i(minibatch_index)
valid_error.append(v_loss)
this_validation_loss = numpy.mean(valid_error)
'''
# this has a possible bias if the minibatches were not all of identical size
# but it should not be siginficant if minibatches are small
this_train_valid_loss = numpy.mean(train_error)
sub_end_time = time.clock()
loss_difference = this_validation_loss - previous_loss
logger.info('BASIC epoch %i, validation error %f, train error %f time spent %.2f' %(epoch, this_validation_loss, this_train_valid_loss, (sub_end_time - sub_start_time)))
if plot:
plotlogger.add_plot_point('training convergence','validation set',(epoch,this_validation_loss))
plotlogger.add_plot_point('training convergence','training set',(epoch,this_train_valid_loss))
plotlogger.save_plot('training convergence',title='Progress of training and validation error',xlabel='epochs',ylabel='error')
if this_validation_loss < best_validation_loss:
best_dnn_model = dnn_model
best_validation_loss = this_validation_loss
logger.debug('validation loss decreased, so saving model')
early_stop = 0
else:
logger.debug('validation loss did not improve')
dbn = best_dnn_model
early_stop += 1
if early_stop > early_stop_epoch:
# too many consecutive epochs without surpassing the best model
logger.debug('stopping early')
break
if math.isnan(this_validation_loss):
break
previous_loss = this_validation_loss
### Save projection values:
if cfg.hyper_params['model_type'] == 'TPDNN':
if not os.path.isdir(cfg.projection_weights_output_dir):
os.mkdir(cfg.projection_weights_output_dir)
weights = dnn_model.get_projection_weights()
fname = os.path.join(cfg.projection_weights_output_dir, 'proj_BASIC_epoch_%s'%(epoch))
numpy.savetxt(fname, weights)
end_time = time.clock()
pickle.dump(best_dnn_model, open(nnets_file_name, 'wb'))
logger.info('overall training time: %.2fm validation error %f' % ((end_time - start_time) / 60., best_validation_loss))
if plot:
plotlogger.save_plot('training convergence',title='Final training and validation error',xlabel='epochs',ylabel='error')
## Function for training the non-projection part only
def train_basic_DNN(train_xy_file_list, valid_xy_file_list, \
nnets_file_name, n_ins, n_outs, ms_outs, hyper_params, buffer_size, plot=False):
# get loggers for this function
# this one writes to both console and file
logger = logging.getLogger("main.train_DNN")
logger.debug('Starting train_DNN')
if plot:
# this one takes care of plotting duties
plotlogger = logging.getLogger("plotting")
# create an (empty) plot of training convergence, ready to receive data points
logger.create_plot('training convergence',MultipleSeriesPlot)
try:
assert numpy.sum(ms_outs) == n_outs
except AssertionError:
logger.critical('the summation of multi-stream outputs does not equal to %d' %(n_outs))
raise
####parameters#####
finetune_lr = float(hyper_params['learning_rate'])
training_epochs = int(hyper_params['training_epochs'])
batch_size = int(hyper_params['batch_size'])
l1_reg = float(hyper_params['l1_reg'])
l2_reg = float(hyper_params['l2_reg'])
private_l2_reg = float(hyper_params['private_l2_reg'])
warmup_epoch = int(hyper_params['warmup_epoch'])
momentum = float(hyper_params['momentum'])
warmup_momentum = float(hyper_params['warmup_momentum'])
hidden_layers_sizes = hyper_params['hidden_layers_sizes']
stream_weights = hyper_params['stream_weights']
private_hidden_sizes = hyper_params['private_hidden_sizes']
buffer_utt_size = buffer_size
early_stop_epoch = int(hyper_params['early_stop_epochs'])
hidden_activation = hyper_params['hidden_activation']
output_activation = hyper_params['output_activation']
stream_lr_weights = hyper_params['stream_lr_weights']
use_private_hidden = hyper_params['use_private_hidden']
model_type = hyper_params['model_type']
index_to_project = hyper_params['index_to_project']
projection_insize = hyper_params['projection_insize']
projection_outsize = hyper_params['projection_outsize']
## use a switch to turn on pretraining
## pretraining may not help too much, if this case, we turn it off to save time
do_pretraining = hyper_params['do_pretraining']
pretraining_epochs = int(hyper_params['pretraining_epochs'])
pretraining_lr = float(hyper_params['pretraining_lr'])
initial_projection_distrib = hyper_params['initial_projection_distrib']
buffer_size = int(buffer_size / batch_size) * batch_size
###################
(train_x_file_list, train_y_file_list) = train_xy_file_list
(valid_x_file_list, valid_y_file_list) = valid_xy_file_list
logger.debug('Creating training data provider')
train_data_reader = ListDataProviderWithProjectionIndex(x_file_list = train_x_file_list, y_file_list = train_y_file_list, n_ins = n_ins, n_outs = n_outs, buffer_size = buffer_size, shuffle = True, index_to_project=index_to_project, projection_insize=projection_insize, indexes_only=expand_by_minibatch)
logger.debug('Creating validation data provider')
valid_data_reader = ListDataProviderWithProjectionIndex(x_file_list = valid_x_file_list, y_file_list = valid_y_file_list, n_ins = n_ins, n_outs = n_outs, buffer_size = buffer_size, shuffle = False, index_to_project=index_to_project, projection_insize=projection_insize, indexes_only=expand_by_minibatch)
shared_train_set_xy, temp_train_set_x, temp_train_set_x_proj, temp_train_set_y = train_data_reader.load_next_partition_with_projection()
train_set_x, train_set_x_proj, train_set_y = shared_train_set_xy
shared_valid_set_xy, temp_valid_set_x, temp_valid_set_x_proj, temp_valid_set_y = valid_data_reader.load_next_partition_with_projection()
valid_set_x, valid_set_x_proj, valid_set_y = shared_valid_set_xy
train_data_reader.reset()
valid_data_reader.reset()
##temporally we use the training set as pretrain_set_x.
##we need to support any data for pretraining
pretrain_set_x = train_set_x
# numpy random generator
numpy_rng = numpy.random.RandomState(123)
logger.info('building the model')
dnn_model = None
pretrain_fn = None ## not all the model support pretraining right now
train_fn = None
valid_fn = None
valid_model = None ## valid_fn and valid_model are the same. reserve to computer multi-stream distortion
if model_type == 'DNN':
dnn_model = DNN(numpy_rng=numpy_rng, n_ins=n_ins, n_outs = n_outs,
l1_reg = l1_reg, l2_reg = l2_reg,
hidden_layers_sizes = hidden_layers_sizes,
hidden_activation = hidden_activation,
output_activation = output_activation)
train_fn, valid_fn = dnn_model.build_finetune_functions(
(train_set_x, train_set_y), (valid_set_x, valid_set_y), batch_size=batch_size)
elif model_type == 'TPDNN':
dnn_model = TokenProjectionDNN(numpy_rng=numpy_rng, n_ins=n_ins, n_outs = n_outs,
l1_reg = l1_reg, l2_reg = l2_reg,
hidden_layers_sizes = hidden_layers_sizes,
hidden_activation = hidden_activation,
output_activation = output_activation,
projection_insize=projection_insize, projection_outsize=projection_outsize,
expand_by_minibatch=expand_by_minibatch, initial_projection_distrib=initial_projection_distrib)
train_all_fn, train_subword_fn, train_word_fn, infer_projections_fn, valid_fn, valid_score_i = \
dnn_model.build_finetune_functions(
(train_set_x, train_set_x_proj, train_set_y),
(valid_set_x, valid_set_x_proj, valid_set_y), batch_size=batch_size)
elif model_type == 'SDAE':
##basic model is ready.
##if corruption levels is set to zero. it becomes normal autoencoder
dnn_model = StackedDenoiseAutoEncoder(numpy_rng=numpy_rng, n_ins=n_ins, n_outs = n_outs,
l1_reg = l1_reg, l2_reg = l2_reg,
hidden_layers_sizes = hidden_layers_sizes)
if do_pretraining:
pretraining_fn = dnn_model.pretraining_functions(pretrain_set_x, batch_size)
train_fn, valid_fn = dnn_model.build_finetune_functions(
(train_set_x, train_set_y), (valid_set_x, valid_set_y), batch_size=batch_size)
elif model_type == 'MSDNN': ##model is ready, but the hyper-parameters are not optimised.
dnn_model = MultiStreamDNN(numpy_rng=numpy_rng, n_ins=n_ins, ms_outs=ms_outs,
l1_reg = l1_reg, l2_reg = l2_reg,
hidden_layers_sizes = hidden_layers_sizes,
stream_weights = stream_weights,
hidden_activation = hidden_activation,
output_activation = output_activation)
train_fn, valid_fn = dnn_model.build_finetune_functions(
(train_set_x, train_set_y), (valid_set_x, valid_set_y),
batch_size=batch_size, lr_weights = stream_lr_weights)
elif model_type == 'MSDNN_GV': ## not fully ready
dnn_model = MultiStreamDNNGv(numpy_rng=numpy_rng, n_ins=n_ins, ms_outs=ms_outs,
l1_reg = l1_reg, l2_reg = l2_reg,
hidden_layers_sizes = hidden_layers_sizes,
stream_weights = stream_weights,
hidden_activation = hidden_activation,
output_activation = output_activation)
train_fn, valid_fn = dnn_model.build_finetune_functions(
(train_set_x, train_set_y), (valid_set_x, valid_set_y),
batch_size=batch_size, lr_weights = stream_lr_weights)
else:
logger.critical('%s type NN model is not supported!' %(model_type))
raise
## if pretraining is supported in one model, add the switch here
## be careful to use autoencoder for pretraining here:
## for SDAE, currently only sigmoid function is supported in the hidden layers, as our input is scaled to [0, 1]
## however, tanh works better and converge fast in finetuning
##
## Will extend this soon...
if do_pretraining and model_type == 'SDAE':
logger.info('pretraining the %s model' %(model_type))
corruption_level = 0.0
## in SDAE we do layer-wise pretraining using autoencoders
for i in range(dnn_model.n_layers):
for epoch in range(pretraining_epochs):
sub_start_time = time.clock()
pretrain_loss = []
while (not train_data_reader.is_finish()):
shared_train_set_xy, temp_train_set_x, temp_train_set_y = train_data_reader.load_next_partition()
pretrain_set_x.set_value(numpy.asarray(temp_train_set_x, dtype=theano.config.floatX), borrow=True)
n_train_batches = pretrain_set_x.get_value().shape[0] / batch_size
for batch_index in range(n_train_batches):
pretrain_loss.append(pretraining_fn[i](index=batch_index,
corruption=corruption_level,
learning_rate=pretraining_lr))
sub_end_time = time.clock()
logger.info('Pre-training layer %i, epoch %d, cost %s, time spent%.2f' % (i+1, epoch+1, numpy.mean(pretrain_loss), (sub_end_time - sub_start_time)))
train_data_reader.reset()
logger.info('fine-tuning the %s model' %(model_type))
start_time = time.clock()
best_dnn_model = dnn_model
best_validation_loss = sys.float_info.max
previous_loss = sys.float_info.max
early_stop = 0
epoch = 0
previous_finetune_lr = finetune_lr
dnn_model.zero_projection_weights()
while (epoch < training_epochs):
epoch = epoch + 1
current_momentum = momentum
current_finetune_lr = finetune_lr
if epoch <= warmup_epoch:
current_finetune_lr = finetune_lr
current_momentum = warmup_momentum
else:
current_finetune_lr = previous_finetune_lr * 0.5
previous_finetune_lr = current_finetune_lr
train_error = []
sub_start_time = time.clock()
while (not train_data_reader.is_finish()):
shared_train_set_xy, temp_train_set_x, temp_train_set_x_proj, temp_train_set_y = train_data_reader.load_next_partition_with_projection()
train_set_x.set_value(numpy.asarray(temp_train_set_x, dtype=theano.config.floatX), borrow=True)
train_set_x_proj.set_value(numpy.asarray(temp_train_set_x_proj, dtype=proj_type), borrow=True)
train_set_y.set_value(numpy.asarray(temp_train_set_y, dtype=theano.config.floatX), borrow=True)
n_train_batches = train_set_x.get_value().shape[0] / batch_size
logger.debug('this partition: %d frames (divided into %d batches of size %d)' %(train_set_x.get_value(borrow=True).shape[0], n_train_batches, batch_size) )
for minibatch_index in range(n_train_batches):
this_train_error = train_subword_fn(minibatch_index, current_finetune_lr, current_momentum)
train_error.append(this_train_error)
if numpy.isnan(this_train_error):
logger.warning('training error over minibatch %d of %d was %s' % (minibatch_index+1,n_train_batches,this_train_error) )
train_data_reader.reset()
## osw -- getting validation error from a forward pass in a single batch
## exausts memory when using 20k projected vocab -- also use minibatches
logger.debug('calculating validation loss')
valid_error = []
n_valid_batches = valid_set_x.get_value().shape[0] / batch_size
for minibatch_index in range(n_valid_batches):
v_loss = valid_score_i(minibatch_index)
valid_error.append(v_loss)
this_validation_loss = numpy.mean(valid_error)
# this has a possible bias if the minibatches were not all of identical size
# but it should not be siginficant if minibatches are small
this_train_valid_loss = numpy.mean(train_error)
sub_end_time = time.clock()
loss_difference = this_validation_loss - previous_loss
logger.info('BASIC epoch %i, validation error %f, train error %f time spent %.2f' %(epoch, this_validation_loss, this_train_valid_loss, (sub_end_time - sub_start_time)))
if plot:
plotlogger.add_plot_point('training convergence','validation set',(epoch,this_validation_loss))
plotlogger.add_plot_point('training convergence','training set',(epoch,this_train_valid_loss))
plotlogger.save_plot('training convergence',title='Progress of training and validation error',xlabel='epochs',ylabel='error')
if this_validation_loss < best_validation_loss:
best_dnn_model = dnn_model
best_validation_loss = this_validation_loss
logger.debug('validation loss decreased, so saving model')
early_stop = 0
else:
logger.debug('validation loss did not improve')
dbn = best_dnn_model
early_stop += 1
if early_stop > early_stop_epoch:
# too many consecutive epochs without surpassing the best model
logger.debug('stopping early')
break
if math.isnan(this_validation_loss):
break
previous_loss = this_validation_loss
### Save projection values:
if cfg.hyper_params['model_type'] == 'TPDNN':
if not os.path.isdir(cfg.projection_weights_output_dir):
os.mkdir(cfg.projection_weights_output_dir)
weights = dnn_model.get_projection_weights()
fname = os.path.join(cfg.projection_weights_output_dir, 'proj_BASIC_epoch_%s'%(epoch))
numpy.savetxt(fname, weights)
end_time = time.clock()
pickle.dump(best_dnn_model, open(nnets_file_name, 'wb'))
logger.info('overall training time: %.2fm validation error %f' % ((end_time - start_time) / 60., best_validation_loss))
if plot:
plotlogger.save_plot('training convergence',title='Final training and validation error',xlabel='epochs',ylabel='error')
### ========== now train the word residual ============
def train_DNN_with_projections(train_xy_file_list, valid_xy_file_list, \
nnets_file_name, n_ins, n_outs, ms_outs, hyper_params, buffer_size, plot=False):
####parameters#####
finetune_lr = float(hyper_params['learning_rate'])
training_epochs = int(hyper_params['training_epochs'])
batch_size = int(hyper_params['batch_size'])
l1_reg = float(hyper_params['l1_reg'])
l2_reg = float(hyper_params['l2_reg'])
private_l2_reg = float(hyper_params['private_l2_reg'])
warmup_epoch = int(hyper_params['warmup_epoch'])
momentum = float(hyper_params['momentum'])
warmup_momentum = float(hyper_params['warmup_momentum'])
hidden_layers_sizes = hyper_params['hidden_layers_sizes']
stream_weights = hyper_params['stream_weights']
private_hidden_sizes = hyper_params['private_hidden_sizes']
buffer_utt_size = buffer_size
early_stop_epoch = int(hyper_params['early_stop_epochs'])
hidden_activation = hyper_params['hidden_activation']
output_activation = hyper_params['output_activation']
stream_lr_weights = hyper_params['stream_lr_weights']
use_private_hidden = hyper_params['use_private_hidden']
model_type = hyper_params['model_type']
index_to_project = hyper_params['index_to_project']
projection_insize = hyper_params['projection_insize']
projection_outsize = hyper_params['projection_outsize']
######### data providers ##########
(train_x_file_list, train_y_file_list) = train_xy_file_list
(valid_x_file_list, valid_y_file_list) = valid_xy_file_list
logger.debug('Creating training data provider')
train_data_reader = ListDataProviderWithProjectionIndex(x_file_list = train_x_file_list, y_file_list = train_y_file_list, n_ins = n_ins, n_outs = n_outs, buffer_size = buffer_size, shuffle = True, index_to_project=index_to_project, projection_insize=projection_insize, indexes_only=expand_by_minibatch)
logger.debug('Creating validation data provider')
valid_data_reader = ListDataProviderWithProjectionIndex(x_file_list = valid_x_file_list, y_file_list = valid_y_file_list, n_ins = n_ins, n_outs = n_outs, buffer_size = buffer_size, shuffle = False, index_to_project=index_to_project, projection_insize=projection_insize, indexes_only=expand_by_minibatch)
shared_train_set_xy, temp_train_set_x, temp_train_set_x_proj, temp_train_set_y = train_data_reader.load_next_partition_with_projection()
train_set_x, train_set_x_proj, train_set_y = shared_train_set_xy
shared_valid_set_xy, temp_valid_set_x, temp_valid_set_x_proj, temp_valid_set_y = valid_data_reader.load_next_partition_with_projection()
valid_set_x, valid_set_x_proj, valid_set_y = shared_valid_set_xy
train_data_reader.reset()
valid_data_reader.reset()
####################################
# numpy random generator
numpy_rng = numpy.random.RandomState(123)
logger.info('building the model')
############## load existing dnn #####
dnn_model = pickle.load(open(nnets_file_name, 'rb'))
train_all_fn, train_subword_fn, train_word_fn, infer_projections_fn, valid_fn, valid_score_i = \
dnn_model.build_finetune_functions(
(train_set_x, train_set_x_proj, train_set_y),
(valid_set_x, valid_set_x_proj, valid_set_y), batch_size=batch_size)
####################################
logger.info('fine-tuning the %s model' %(model_type))
start_time = time.clock()
best_dnn_model = dnn_model
best_validation_loss = sys.float_info.max
previous_loss = sys.float_info.max
early_stop = 0
epoch = 0
previous_finetune_lr = finetune_lr
dnn_model.initialise_projection_weights()
all_epochs = 20 ## 100 ## <-------- hard coded !!!!!!!!!!
current_finetune_lr = previous_finetune_lr = finetune_lr
warmup_epoch_2 = 10 # 10 ## <-------- hard coded !!!!!!!!!!
while (epoch < all_epochs):
epoch = epoch + 1
current_momentum = momentum
if epoch > warmup_epoch_2:
previous_finetune_lr = current_finetune_lr
current_finetune_lr = previous_finetune_lr * 0.5
train_error = []
sub_start_time = time.clock()
while (not train_data_reader.is_finish()):
shared_train_set_xy, temp_train_set_x, temp_train_set_x_proj, temp_train_set_y = train_data_reader.load_next_partition_with_projection()
train_set_x.set_value(numpy.asarray(temp_train_set_x, dtype=theano.config.floatX), borrow=True)
train_set_x_proj.set_value(numpy.asarray(temp_train_set_x_proj, dtype=proj_type), borrow=True)
train_set_y.set_value(numpy.asarray(temp_train_set_y, dtype=theano.config.floatX), borrow=True)
n_train_batches = train_set_x.get_value().shape[0] / batch_size
logger.debug('this partition: %d frames (divided into %d batches of size %d)' %(train_set_x.get_value(borrow=True).shape[0], n_train_batches, batch_size) )
for minibatch_index in range(n_train_batches):
this_train_error = train_word_fn(minibatch_index, current_finetune_lr, current_momentum)
train_error.append(this_train_error)
if numpy.isnan(this_train_error):
logger.warning('training error over minibatch %d of %d was %s' % (minibatch_index+1,n_train_batches,this_train_error) )
train_data_reader.reset()
### COULD REMOVE THIS LATER
## osw -- getting validation error from a forward pass in a single batch
## exausts memory when using 20k projected vocab -- also use minibatches
logger.debug('calculating validation loss')
valid_error = []
n_valid_batches = valid_set_x.get_value().shape[0] / batch_size
for minibatch_index in range(n_valid_batches):
v_loss = valid_score_i(minibatch_index)
valid_error.append(v_loss)
this_validation_loss = numpy.mean(valid_error)
# this has a possible bias if the minibatches were not all of identical size
# but it should not be siginficant if minibatches are small
this_train_valid_loss = numpy.mean(train_error)
# if plot:
# ## add dummy validation loss so that plot works:
# plotlogger.add_plot_point('training convergence','validation set',(epoch,this_validation_loss))
# plotlogger.add_plot_point('training convergence','training set',(epoch,this_train_valid_loss))
#
sub_end_time = time.clock()
logger.info('TOKEN epoch %i, validation error %f, train error %f time spent %.2f' %(epoch, this_validation_loss, this_train_valid_loss, (sub_end_time - sub_start_time)))
if cfg.hyper_params['model_type'] == 'TPDNN':
if not os.path.isdir(cfg.projection_weights_output_dir):
os.mkdir(cfg.projection_weights_output_dir)
weights = dnn_model.get_projection_weights()
fname = os.path.join(cfg.projection_weights_output_dir, 'proj_TOKEN_epoch_%s'%(epoch))
numpy.savetxt(fname, weights)
best_dnn_model = dnn_model ## always update
end_time = time.clock()
pickle.dump(best_dnn_model, open(nnets_file_name, 'wb'))
logger.info('overall training time: %.2fm validation error %f' % ((end_time - start_time) / 60., best_validation_loss))
# if plot:
# plotlogger.save_plot('training convergence',title='Final training and validation error',xlabel='epochs',ylabel='error')
#
### ========================================================
### ========== now infer word represntations for out-of-training (dev) data ============
#
# ### TEMP-- restarted!!! ### ~~~~~~~
# epoch = 50
# dnn_model = cPickle.load(open(nnets_file_name, 'rb'))
# train_all_fn, train_subword_fn, train_word_fn, infer_projections_fn, valid_fn, valid_score_i = \
# dnn_model.build_finetune_functions(
# (train_set_x, train_set_x_proj, train_set_y),
# (valid_set_x, valid_set_x_proj, valid_set_y), batch_size=batch_size)
# this_train_valid_loss = 198.0 ## approx value
# ### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def infer_projections(train_xy_file_list, valid_xy_file_list, \
nnets_file_name, n_ins, n_outs, ms_outs, hyper_params, buffer_size, plot=False):
####parameters#####
finetune_lr = float(hyper_params['learning_rate'])
training_epochs = int(hyper_params['training_epochs'])
batch_size = int(hyper_params['batch_size'])
l1_reg = float(hyper_params['l1_reg'])
l2_reg = float(hyper_params['l2_reg'])
private_l2_reg = float(hyper_params['private_l2_reg'])
warmup_epoch = int(hyper_params['warmup_epoch'])
momentum = float(hyper_params['momentum'])
warmup_momentum = float(hyper_params['warmup_momentum'])
hidden_layers_sizes = hyper_params['hidden_layers_sizes']
stream_weights = hyper_params['stream_weights']
private_hidden_sizes = hyper_params['private_hidden_sizes']
buffer_utt_size = buffer_size
early_stop_epoch = int(hyper_params['early_stop_epochs'])
hidden_activation = hyper_params['hidden_activation']
output_activation = hyper_params['output_activation']
stream_lr_weights = hyper_params['stream_lr_weights']
use_private_hidden = hyper_params['use_private_hidden']
model_type = hyper_params['model_type']
index_to_project = hyper_params['index_to_project']
projection_insize = hyper_params['projection_insize']
projection_outsize = hyper_params['projection_outsize']
######### data providers ##########
(train_x_file_list, train_y_file_list) = train_xy_file_list
(valid_x_file_list, valid_y_file_list) = valid_xy_file_list
logger.debug('Creating training data provider')
train_data_reader = ListDataProviderWithProjectionIndex(x_file_list = train_x_file_list, y_file_list = train_y_file_list, n_ins = n_ins, n_outs = n_outs, buffer_size = buffer_size, shuffle = True, index_to_project=index_to_project, projection_insize=projection_insize, indexes_only=expand_by_minibatch)
logger.debug('Creating validation data provider')
valid_data_reader = ListDataProviderWithProjectionIndex(x_file_list = valid_x_file_list, y_file_list = valid_y_file_list, n_ins = n_ins, n_outs = n_outs, buffer_size = buffer_size, shuffle = False, index_to_project=index_to_project, projection_insize=projection_insize, indexes_only=expand_by_minibatch)
shared_train_set_xy, temp_train_set_x, temp_train_set_x_proj, temp_train_set_y = train_data_reader.load_next_partition_with_projection()
train_set_x, train_set_x_proj, train_set_y = shared_train_set_xy
shared_valid_set_xy, temp_valid_set_x, temp_valid_set_x_proj, temp_valid_set_y = valid_data_reader.load_next_partition_with_projection()
valid_set_x, valid_set_x_proj, valid_set_y = shared_valid_set_xy
train_data_reader.reset()
valid_data_reader.reset()
####################################
# numpy random generator
numpy_rng = numpy.random.RandomState(123)
logger.info('building the model')
############## load existing dnn #####
dnn_model = pickle.load(open(nnets_file_name, 'rb'))
train_all_fn, train_subword_fn, train_word_fn, infer_projections_fn, valid_fn, valid_score_i = \
dnn_model.build_finetune_functions(
(train_set_x, train_set_x_proj, train_set_y),
(valid_set_x, valid_set_x_proj, valid_set_y), batch_size=batch_size)
####################################
logger.info('fine-tuning the %s model' %(model_type))
start_time = time.clock()
best_dnn_model = dnn_model
best_validation_loss = sys.float_info.max
previous_loss = sys.float_info.max
early_stop = 0
epoch = 0
previous_finetune_lr = finetune_lr
logger.info('fine-tuning the %s model' %(model_type))
#dnn_model.initialise_projection_weights()
inference_epochs = 20 ## <-------- hard coded !!!!!!!!!!
current_finetune_lr = previous_finetune_lr = finetune_lr
warmup_epoch_3 = 10 # 10 ## <-------- hard coded !!!!!!!!!!
#warmup_epoch_3 = epoch + warmup_epoch_3
#inference_epochs += epoch
while (epoch < inference_epochs):
epoch = epoch + 1
current_momentum = momentum
if epoch > warmup_epoch_3:
previous_finetune_lr = current_finetune_lr
current_finetune_lr = previous_finetune_lr * 0.5
dev_error = []
sub_start_time = time.clock()
## osw -- inferring word reps on validation set in a forward pass in a single batch
## exausts memory when using 20k projected vocab -- also use minibatches
logger.debug('infer word representations for validation set')
valid_error = []
n_valid_batches = valid_set_x.get_value().shape[0] / batch_size
for minibatch_index in range(n_valid_batches):
v_loss = infer_projections_fn(minibatch_index, current_finetune_lr, current_momentum)
valid_error.append(v_loss)
this_validation_loss = numpy.mean(valid_error)
#valid_error = infer_projections_fn(current_finetune_lr, current_momentum)
#this_validation_loss = numpy.mean(valid_error)
# if plot:
# ## add dummy validation loss so that plot works:
# plotlogger.add_plot_point('training convergence','validation set',(epoch,this_validation_loss))
# plotlogger.add_plot_point('training convergence','training set',(epoch,this_train_valid_loss))
#
sub_end_time = time.clock()
logger.info('INFERENCE epoch %i, validation error %f, time spent %.2f' %(epoch, this_validation_loss, (sub_end_time - sub_start_time)))
if cfg.hyper_params['model_type'] == 'TPDNN':
if not os.path.isdir(cfg.projection_weights_output_dir):
os.mkdir(cfg.projection_weights_output_dir)
weights = dnn_model.get_projection_weights()
fname = os.path.join(cfg.projection_weights_output_dir, 'proj_INFERENCE_epoch_%s'%(epoch))
numpy.savetxt(fname, weights)
best_dnn_model = dnn_model ## always update
end_time = time.clock()
pickle.dump(best_dnn_model, open(nnets_file_name, 'wb'))
logger.info('overall training time: %.2fm validation error %f' % ((end_time - start_time) / 60., best_validation_loss))
# if plot:
# plotlogger.save_plot('training convergence',title='Final training and validation error',xlabel='epochs',ylabel='error')
#
### ========================================================
if cfg.hyper_params['model_type'] == 'TPDNN':
os.system('python %s %s'%('/afs/inf.ed.ac.uk/user/o/owatts/scripts_NEW/plot_weights_multiple_phases.py', cfg.projection_weights_output_dir))
return best_validation_loss
def dnn_generation(valid_file_list, nnets_file_name, n_ins, n_outs, out_file_list, cfg=None, use_word_projections=True):
logger = logging.getLogger("dnn_generation")
logger.debug('Starting dnn_generation')
plotlogger = logging.getLogger("plotting")
dnn_model = pickle.load(open(nnets_file_name, 'rb'))
## 'remove' word representations by randomising them. As model is unpickled and
## no re-saved, this does not throw trained parameters away.
if not use_word_projections:
dnn_model.initialise_projection_weights()
# visualize_dnn(dbn)
file_number = len(valid_file_list)
for i in range(file_number):
logger.info('generating %4d of %4d: %s' % (i+1,file_number,valid_file_list[i]) )
fid_lab = open(valid_file_list[i], 'rb')
features = numpy.fromfile(fid_lab, dtype=numpy.float32)
fid_lab.close()
features = features[:(n_ins * (features.size / n_ins))]
features = features.reshape((-1, n_ins))
#features, features_proj = expand_projection_inputs(features, cfg.index_to_project, \
# cfg.projection_insize)
features, features_proj = get_unexpanded_projection_inputs(features, cfg.index_to_project, \
cfg.projection_insize)
#temp_set_x = features.tolist() ## osw - why list conversion necessary?
#print temp_set_x
test_set_x = theano.shared(numpy.asarray(features, dtype=theano.config.floatX))
test_set_x_proj = theano.shared(numpy.asarray(features_proj, dtype='int32'))
predicted_parameter = dnn_model.parameter_prediction(test_set_x=test_set_x, test_set_x_proj=test_set_x_proj)
# predicted_parameter = test_out()
### write to cmp file
predicted_parameter = numpy.array(predicted_parameter, 'float32')
temp_parameter = predicted_parameter
fid = open(out_file_list[i], 'wb')
predicted_parameter.tofile(fid)
logger.debug('saved to %s' % out_file_list[i])
fid.close()
##generate bottleneck layer as festures
def dnn_hidden_generation(valid_file_list, nnets_file_name, n_ins, n_outs, out_file_list):
logger = logging.getLogger("dnn_generation")
logger.debug('Starting dnn_generation')
plotlogger = logging.getLogger("plotting")
dnn_model = pickle.load(open(nnets_file_name, 'rb'))
file_number = len(valid_file_list)
for i in range(file_number):
logger.info('generating %4d of %4d: %s' % (i+1,file_number,valid_file_list[i]) )
fid_lab = open(valid_file_list[i], 'rb')
features = numpy.fromfile(fid_lab, dtype=numpy.float32)
fid_lab.close()
features = features[:(n_ins * (features.size / n_ins))]
features = features.reshape((-1, n_ins))
temp_set_x = features.tolist()
test_set_x = theano.shared(numpy.asarray(temp_set_x, dtype=theano.config.floatX))
predicted_parameter = dnn_model.generate_top_hidden_layer(test_set_x=test_set_x)
### write to cmp file
predicted_parameter = numpy.array(predicted_parameter, 'float32')
temp_parameter = predicted_parameter
fid = open(out_file_list[i], 'wb')
predicted_parameter.tofile(fid)
logger.debug('saved to %s' % out_file_list[i])
fid.close()
def main_function(cfg):
# get a logger for this main function
logger = logging.getLogger("main")
# get another logger to handle plotting duties
plotlogger = logging.getLogger("plotting")
# later, we might do this via a handler that is created, attached and configured
# using the standard config mechanism of the logging module
# but for now we need to do it manually
plotlogger.set_plot_path(cfg.plot_dir)
#### parameter setting########
hidden_layers_sizes = cfg.hyper_params['hidden_layers_sizes']
####prepare environment
try:
file_id_list = read_file_list(cfg.file_id_scp)
logger.debug('Loaded file id list from %s' % cfg.file_id_scp)
except IOError:
# this means that open(...) threw an error
logger.critical('Could not load file id list from %s' % cfg.file_id_scp)
raise
###total file number including training, development, and testing
total_file_number = len(file_id_list)
data_dir = cfg.data_dir
nn_cmp_dir = os.path.join(data_dir, 'nn' + cfg.combined_feature_name + '_' + str(cfg.cmp_dim))
nn_cmp_norm_dir = os.path.join(data_dir, 'nn_norm' + cfg.combined_feature_name + '_' + str(cfg.cmp_dim))
model_dir = os.path.join(cfg.work_dir, 'nnets_model')
gen_dir = os.path.join(cfg.work_dir, 'gen')
in_file_list_dict = {}
for feature_name in list(cfg.in_dir_dict.keys()):
in_file_list_dict[feature_name] = prepare_file_path_list(file_id_list, cfg.in_dir_dict[feature_name], cfg.file_extension_dict[feature_name], False)
nn_cmp_file_list = prepare_file_path_list(file_id_list, nn_cmp_dir, cfg.cmp_ext)
nn_cmp_norm_file_list = prepare_file_path_list(file_id_list, nn_cmp_norm_dir, cfg.cmp_ext)
###normalisation information
norm_info_file = os.path.join(data_dir, 'norm_info' + cfg.combined_feature_name + '_' + str(cfg.cmp_dim) + '_' + cfg.output_feature_normalisation + '.dat')
### normalise input full context label
# currently supporting two different forms of lingustic features
# later, we should generalise this
if cfg.label_style == 'HTS':
label_normaliser = HTSLabelNormalisation(question_file_name=cfg.question_file_name)
lab_dim = label_normaliser.dimension
logger.info('Input label dimension is %d' % lab_dim)
suffix=str(lab_dim)
# no longer supported - use new "composed" style labels instead
elif cfg.label_style == 'composed':
# label_normaliser = XMLLabelNormalisation(xpath_file_name=cfg.xpath_file_name)
suffix='composed'
if cfg.process_labels_in_work_dir:
label_data_dir = cfg.work_dir
else:
label_data_dir = data_dir
# the number can be removed
binary_label_dir = os.path.join(label_data_dir, 'binary_label_'+suffix)
nn_label_dir = os.path.join(label_data_dir, 'nn_no_silence_lab_'+suffix)
nn_label_norm_dir = os.path.join(label_data_dir, 'nn_no_silence_lab_norm_'+suffix)
# nn_label_norm_mvn_dir = os.path.join(data_dir, 'nn_no_silence_lab_norm_'+suffix)
in_label_align_file_list = prepare_file_path_list(file_id_list, cfg.in_label_align_dir, cfg.lab_ext, False)
binary_label_file_list = prepare_file_path_list(file_id_list, binary_label_dir, cfg.lab_ext)
nn_label_file_list = prepare_file_path_list(file_id_list, nn_label_dir, cfg.lab_ext)
nn_label_norm_file_list = prepare_file_path_list(file_id_list, nn_label_norm_dir, cfg.lab_ext)
# to do - sanity check the label dimension here?
min_max_normaliser = None
label_norm_file = 'label_norm_%s.dat' %(cfg.label_style)
label_norm_file = os.path.join(label_data_dir, label_norm_file)
if cfg.NORMLAB and (cfg.label_style == 'HTS'):
# simple HTS labels
logger.info('preparing label data (input) using standard HTS style labels')
label_normaliser.perform_normalisation(in_label_align_file_list, binary_label_file_list)
remover = SilenceRemover(n_cmp = lab_dim, silence_pattern = ['*-#+*'])
remover.remove_silence(binary_label_file_list, in_label_align_file_list, nn_label_file_list)
min_max_normaliser = MinMaxNormalisation(feature_dimension = lab_dim, min_value = 0.01, max_value = 0.99)
###use only training data to find min-max information, then apply on the whole dataset
min_max_normaliser.find_min_max_values(nn_label_file_list[0:cfg.train_file_number])
min_max_normaliser.normalise_data(nn_label_file_list, nn_label_norm_file_list)
if cfg.NORMLAB and (cfg.label_style == 'composed'):
# new flexible label preprocessor
logger.info('preparing label data (input) using "composed" style labels')
label_composer = LabelComposer()
label_composer.load_label_configuration(cfg.label_config_file)
logger.info('Loaded label configuration')
# logger.info('%s' % label_composer.configuration.labels )
lab_dim=label_composer.compute_label_dimension()
logger.info('label dimension will be %d' % lab_dim)
if cfg.precompile_xpaths:
label_composer.precompile_xpaths()
# there are now a set of parallel input label files (e.g, one set of HTS and another set of Ossian trees)
# create all the lists of these, ready to pass to the label composer
in_label_align_file_list = {}
for label_style, label_style_required in label_composer.label_styles.items():
if label_style_required:
logger.info('labels of style %s are required - constructing file paths for them' % label_style)
if label_style == 'xpath':
in_label_align_file_list['xpath'] = prepare_file_path_list(file_id_list, cfg.xpath_label_align_dir, cfg.utt_ext, False)
elif label_style == 'hts':
in_label_align_file_list['hts'] = prepare_file_path_list(file_id_list, cfg.hts_label_align_dir, cfg.lab_ext, False)
else:
logger.critical('unsupported label style %s specified in label configuration' % label_style)
raise Exception
# now iterate through the files, one at a time, constructing the labels for them
num_files=len(file_id_list)
logger.info('the label styles required are %s' % label_composer.label_styles)
for i in range(num_files):
logger.info('making input label features for %4d of %4d' % (i+1,num_files))
# iterate through the required label styles and open each corresponding label file
# a dictionary of file descriptors, pointing at the required files
required_labels={}
for label_style, label_style_required in label_composer.label_styles.items():
# the files will be a parallel set of files for a single utterance
# e.g., the XML tree and an HTS label file
if label_style_required:
required_labels[label_style] = open(in_label_align_file_list[label_style][i] , 'r')
logger.debug(' opening label file %s' % in_label_align_file_list[label_style][i])
logger.debug('label styles with open files: %s' % required_labels)
label_composer.make_labels(required_labels,out_file_name=binary_label_file_list[i],fill_missing_values=cfg.fill_missing_values,iterate_over_frames=cfg.iterate_over_frames)
# now close all opened files
for fd in required_labels.values():
fd.close()
# silence removal
if cfg.remove_silence_using_binary_labels:
silence_feature = 0 ## use first feature in label -- hardcoded for now
logger.info('Silence removal from label using silence feature: %s'%(label_composer.configuration.labels[silence_feature]))
logger.info('Silence will be removed from CMP files in same way')
## Binary labels have 2 roles: both the thing trimmed and the instructions for trimming:
trim_silence(binary_label_file_list, nn_label_file_list, lab_dim, \
binary_label_file_list, lab_dim, silence_feature, percent_to_keep=5)
else:
logger.info('No silence removal done')
# start from the labels we have just produced, not trimmed versions
nn_label_file_list = binary_label_file_list
min_max_normaliser = MinMaxNormalisation(feature_dimension = lab_dim, min_value = 0.01, max_value = 0.99, exclude_columns=[cfg.index_to_project])
###use only training data to find min-max information, then apply on the whole dataset
min_max_normaliser.find_min_max_values(nn_label_file_list[0:cfg.train_file_number])
min_max_normaliser.normalise_data(nn_label_file_list, nn_label_norm_file_list)
if min_max_normaliser != None:
### save label normalisation information for unseen testing labels
label_min_vector = min_max_normaliser.min_vector
label_max_vector = min_max_normaliser.max_vector
label_norm_info = numpy.concatenate((label_min_vector, label_max_vector), axis=0)
label_norm_info = numpy.array(label_norm_info, 'float32')
fid = open(label_norm_file, 'wb')
label_norm_info.tofile(fid)
fid.close()
logger.info('saved %s vectors to %s' %(label_min_vector.size, label_norm_file))
### make output acoustic data
if cfg.MAKECMP:
logger.info('creating acoustic (output) features')
delta_win = [-0.5, 0.0, 0.5]
acc_win = [1.0, -2.0, 1.0]
acoustic_worker = AcousticComposition(delta_win = delta_win, acc_win = acc_win)
acoustic_worker.prepare_nn_data(in_file_list_dict, nn_cmp_file_list, cfg.in_dimension_dict, cfg.out_dimension_dict)
if cfg.remove_silence_using_binary_labels:
## do this to get lab_dim:
label_composer = LabelComposer()
label_composer.load_label_configuration(cfg.label_config_file)
lab_dim=label_composer.compute_label_dimension()
silence_feature = 0 ## use first feature in label -- hardcoded for now
logger.info('Silence removal from CMP using binary label file')
## overwrite the untrimmed audio with the trimmed version:
trim_silence(nn_cmp_file_list, nn_cmp_file_list, cfg.cmp_dim, \
binary_label_file_list, lab_dim, silence_feature, percent_to_keep=5)
else: ## back off to previous method using HTS labels:
remover = SilenceRemover(n_cmp = cfg.cmp_dim, silence_pattern = ['*-#+*'])
remover.remove_silence(nn_cmp_file_list, in_label_align_file_list, nn_cmp_file_list) # save to itself
### save acoustic normalisation information for normalising the features back
var_dir = os.path.join(data_dir, 'var')
if not os.path.exists(var_dir):
os.makedirs(var_dir)
var_file_dict = {}
for feature_name in list(cfg.out_dimension_dict.keys()):
var_file_dict[feature_name] = os.path.join(var_dir, feature_name + '_' + str(cfg.out_dimension_dict[feature_name]))
### normalise output acoustic data
if cfg.NORMCMP:
logger.info('normalising acoustic (output) features using method %s' % cfg.output_feature_normalisation)
cmp_norm_info = None
if cfg.output_feature_normalisation == 'MVN':
normaliser = MeanVarianceNorm(feature_dimension=cfg.cmp_dim)
###calculate mean and std vectors on the training data, and apply on the whole dataset
global_mean_vector = normaliser.compute_mean(nn_cmp_file_list[0:cfg.train_file_number], 0, cfg.cmp_dim)
global_std_vector = normaliser.compute_std(nn_cmp_file_list[0:cfg.train_file_number], global_mean_vector, 0, cfg.cmp_dim)
normaliser.feature_normalisation(nn_cmp_file_list, nn_cmp_norm_file_list)
cmp_norm_info = numpy.concatenate((global_mean_vector, global_std_vector), axis=0)
elif cfg.output_feature_normalisation == 'MINMAX':
min_max_normaliser = MinMaxNormalisation(feature_dimension = cfg.cmp_dim)
global_mean_vector = min_max_normaliser.compute_mean(nn_cmp_file_list[0:cfg.train_file_number])
global_std_vector = min_max_normaliser.compute_std(nn_cmp_file_list[0:cfg.train_file_number], global_mean_vector)
min_max_normaliser = MinMaxNormalisation(feature_dimension = cfg.cmp_dim, min_value = 0.01, max_value = 0.99)
min_max_normaliser.find_min_max_values(nn_cmp_file_list[0:cfg.train_file_number])
min_max_normaliser.normalise_data(nn_cmp_file_list, nn_cmp_norm_file_list)
cmp_min_vector = min_max_normaliser.min_vector
cmp_max_vector = min_max_normaliser.max_vector
cmp_norm_info = numpy.concatenate((cmp_min_vector, cmp_max_vector), axis=0)
else:
logger.critical('Normalisation type %s is not supported!\n' %(cfg.output_feature_normalisation))
raise
cmp_norm_info = numpy.array(cmp_norm_info, 'float32')
fid = open(norm_info_file, 'wb')
cmp_norm_info.tofile(fid)
fid.close()
logger.info('saved %s vectors to %s' %(cfg.output_feature_normalisation, norm_info_file))
# logger.debug(' value was\n%s' % cmp_norm_info)
feature_index = 0
for feature_name in list(cfg.out_dimension_dict.keys()):
feature_std_vector = numpy.array(global_std_vector[:,feature_index:feature_index+cfg.out_dimension_dict[feature_name]], 'float32')
fid = open(var_file_dict[feature_name], 'w')
feature_std_vector.tofile(fid)
fid.close()
logger.info('saved %s variance vector to %s' %(feature_name, var_file_dict[feature_name]))
# logger.debug(' value was\n%s' % feature_std_vector)
feature_index += cfg.out_dimension_dict[feature_name]
train_x_file_list = nn_label_norm_file_list[0:cfg.train_file_number]
train_y_file_list = nn_cmp_norm_file_list[0:cfg.train_file_number]
valid_x_file_list = nn_label_norm_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number]
valid_y_file_list = nn_cmp_norm_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number]
test_x_file_list = nn_label_norm_file_list[cfg.train_file_number+cfg.valid_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
test_y_file_list = nn_cmp_norm_file_list[cfg.train_file_number+cfg.valid_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
# we need to know the label dimension before training the DNN
# computing that requires us to look at the labels
#
# currently, there are two ways to do this
if cfg.label_style == 'HTS':
label_normaliser = HTSLabelNormalisation(question_file_name=cfg.question_file_name)
lab_dim = label_normaliser.dimension
elif cfg.label_style == 'composed':
label_composer = LabelComposer()
label_composer.load_label_configuration(cfg.label_config_file)
lab_dim=label_composer.compute_label_dimension()
logger.info('label dimension is %d' % lab_dim)
combined_model_arch = str(len(hidden_layers_sizes))
for hid_size in hidden_layers_sizes:
combined_model_arch += '_' + str(hid_size)
# nnets_file_name = '%s/%s_%s_%d.%d.%d.%d.%d.train.%d.model' \
# %(model_dir, cfg.model_type, cfg.combined_feature_name, int(cfg.multistream_switch),
# len(hidden_layers_sizes), hidden_layers_sizes[0],
# lab_dim, cfg.cmp_dim, cfg.train_file_number)
nnets_file_name = '%s/%s_%s_%d_%s_%d.%d.train.%d.model' \
%(model_dir, cfg.model_type, cfg.combined_feature_name, int(cfg.multistream_switch),
combined_model_arch, lab_dim, cfg.cmp_dim, cfg.train_file_number)
### DNN model training
if cfg.TRAINDNN:
logger.info('training DNN')
try:
os.makedirs(model_dir)
except OSError as e:
if e.errno == errno.EEXIST:
# not an error - just means directory already exists
pass
else:
logger.critical('Failed to create model directory %s' % model_dir)
logger.critical(' OS error was: %s' % e.strerror)
raise
try:
if cfg.scheme == 'stagwise':
train_basic_DNN(train_xy_file_list = (train_x_file_list, train_y_file_list), \
valid_xy_file_list = (valid_x_file_list, valid_y_file_list), \
nnets_file_name = nnets_file_name, \
n_ins = lab_dim, n_outs = cfg.cmp_dim, ms_outs = cfg.multistream_outs, \
hyper_params = cfg.hyper_params, buffer_size = cfg.buffer_size, plot = cfg.plot)
train_DNN_with_projections(train_xy_file_list = (train_x_file_list, train_y_file_list), \
valid_xy_file_list = (valid_x_file_list, valid_y_file_list), \
nnets_file_name = nnets_file_name, \
n_ins = lab_dim, n_outs = cfg.cmp_dim, ms_outs = cfg.multistream_outs, \
hyper_params = cfg.hyper_params, buffer_size = cfg.buffer_size, plot = cfg.plot)
infer_projections(train_xy_file_list = (train_x_file_list, train_y_file_list), \
valid_xy_file_list = (valid_x_file_list, valid_y_file_list), \
nnets_file_name = nnets_file_name, \
n_ins = lab_dim, n_outs = cfg.cmp_dim, ms_outs = cfg.multistream_outs, \
hyper_params = cfg.hyper_params, buffer_size = cfg.buffer_size, plot = cfg.plot)
elif cfg.scheme == 'simultaneous':
train_DNN_and_traindev_projections(train_xy_file_list = (train_x_file_list, train_y_file_list), \
valid_xy_file_list = (valid_x_file_list, valid_y_file_list), \
nnets_file_name = nnets_file_name, \
n_ins = lab_dim, n_outs = cfg.cmp_dim, ms_outs = cfg.multistream_outs, \
hyper_params = cfg.hyper_params, buffer_size = cfg.buffer_size, plot = cfg.plot)
else:
sys.exit('unknown scheme!')
# train_DNN(train_xy_file_list = (train_x_file_list, train_y_file_list), \
# valid_xy_file_list = (valid_x_file_list, valid_y_file_list), \
# nnets_file_name = nnets_file_name, \
# n_ins = lab_dim, n_outs = cfg.cmp_dim, ms_outs = cfg.multistream_outs, \
# hyper_params = cfg.hyper_params, buffer_size = cfg.buffer_size, plot = cfg.plot)
# infer_projections(train_xy_file_list = (train_x_file_list, train_y_file_list), \
# valid_xy_file_list = (valid_x_file_list, valid_y_file_list), \
# nnets_file_name = nnets_file_name, \
# n_ins = lab_dim, n_outs = cfg.cmp_dim, ms_outs = cfg.multistream_outs, \
# hyper_params = cfg.hyper_params, buffer_size = cfg.buffer_size, plot = cfg.plot)
except KeyboardInterrupt:
logger.critical('train_DNN interrupted via keyboard')
# Could 'raise' the exception further, but that causes a deep traceback to be printed
# which we don't care about for a keyboard interrupt. So, just bail out immediately
sys.exit(1)
except:
logger.critical('train_DNN threw an exception')
raise
### generate parameters from DNN (with random token reps and inferred ones -- NOTOKENS & TOKENS)
temp_dir_name_NOTOKENS = '%s_%s_%d_%d_%d_%d_%d_%d_NOTOKENS' \
%(cfg.model_type, cfg.combined_feature_name, int(cfg.do_post_filtering), \
cfg.train_file_number, lab_dim, cfg.cmp_dim, \
len(hidden_layers_sizes), hidden_layers_sizes[0])
gen_dir_NOTOKENS = os.path.join(gen_dir, temp_dir_name_NOTOKENS)
temp_dir_name_TOKENS = '%s_%s_%d_%d_%d_%d_%d_%d_TOKENS' \
%(cfg.model_type, cfg.combined_feature_name, int(cfg.do_post_filtering), \
cfg.train_file_number, lab_dim, cfg.cmp_dim, \
len(hidden_layers_sizes), hidden_layers_sizes[0])
gen_dir_TOKENS = os.path.join(gen_dir, temp_dir_name_TOKENS)
gen_file_id_list = file_id_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
test_x_file_list = nn_label_norm_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
if cfg.DNNGEN:
logger.info('generating from DNN')
try:
os.makedirs(gen_dir)
except OSError as e:
if e.errno == errno.EEXIST:
# not an error - just means directory already exists
pass
else:
logger.critical('Failed to create generation directory %s' % gen_dir)
logger.critical(' OS error was: %s' % e.strerror)
raise
## Without words embeddings:
gen_file_list_NOTOKENS = prepare_file_path_list(gen_file_id_list, gen_dir_NOTOKENS, cfg.cmp_ext)
dnn_generation(test_x_file_list, nnets_file_name, lab_dim, cfg.cmp_dim, gen_file_list_NOTOKENS, cfg=cfg, use_word_projections=False)
## With word embeddings:
gen_file_list_TOKENS = prepare_file_path_list(gen_file_id_list, gen_dir_TOKENS, cfg.cmp_ext)
dnn_generation(test_x_file_list, nnets_file_name, lab_dim, cfg.cmp_dim, gen_file_list_TOKENS, cfg=cfg, use_word_projections=True)
logger.debug('denormalising generated output using method %s' % cfg.output_feature_normalisation)
for gen_file_list in [gen_file_list_NOTOKENS, gen_file_list_TOKENS]:
fid = open(norm_info_file, 'rb')
cmp_min_max = numpy.fromfile(fid, dtype=numpy.float32)
fid.close()
cmp_min_max = cmp_min_max.reshape((2, -1))
cmp_min_vector = cmp_min_max[0, ]
cmp_max_vector = cmp_min_max[1, ]
if cfg.output_feature_normalisation == 'MVN':
denormaliser = MeanVarianceNorm(feature_dimension = cfg.cmp_dim)
denormaliser.feature_denormalisation(gen_file_list, gen_file_list, cmp_min_vector, cmp_max_vector)
elif cfg.output_feature_normalisation == 'MINMAX':
denormaliser = MinMaxNormalisation(cfg.cmp_dim, min_value = 0.01, max_value = 0.99, min_vector = cmp_min_vector, max_vector = cmp_max_vector)
denormaliser.denormalise_data(gen_file_list, gen_file_list)
else:
logger.critical('denormalising method %s is not supported!\n' %(cfg.output_feature_normalisation))
raise
##perform MLPG to smooth parameter trajectory
## lf0 is included, the output features much have vuv.
generator = ParameterGeneration(gen_wav_features = cfg.gen_wav_features)
generator.acoustic_decomposition(gen_file_list, cfg.cmp_dim, cfg.out_dimension_dict, cfg.file_extension_dict, var_file_dict)
## osw: skip MLPG:
# split_cmp(gen_file_list, ['mgc', 'lf0', 'bap'], cfg.cmp_dim, cfg.out_dimension_dict, cfg.file_extension_dict)
### generate wav
if cfg.GENWAV:
logger.info('reconstructing waveform(s)')
for gen_dir in [gen_dir_NOTOKENS, gen_dir_TOKENS]:
generate_wav(gen_dir, gen_file_id_list, cfg) # generated speech
# generate_wav(nn_cmp_dir, gen_file_id_list) # reference copy synthesis speech
### evaluation: calculate distortion
if cfg.CALMCD:
logger.info('calculating MCD')
ref_data_dir = os.path.join(data_dir, 'ref_data')
ref_mgc_list = prepare_file_path_list(gen_file_id_list, ref_data_dir, cfg.mgc_ext)
ref_bap_list = prepare_file_path_list(gen_file_id_list, ref_data_dir, cfg.bap_ext)
ref_lf0_list = prepare_file_path_list(gen_file_id_list, ref_data_dir, cfg.lf0_ext)
in_gen_label_align_file_list = in_label_align_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
calculator = IndividualDistortionComp()
spectral_distortion = 0.0
bap_mse = 0.0
f0_mse = 0.0
vuv_error = 0.0
valid_file_id_list = file_id_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number]
test_file_id_list = file_id_list[cfg.train_file_number+cfg.valid_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
if cfg.remove_silence_using_binary_labels:
## get lab_dim:
label_composer = LabelComposer()
label_composer.load_label_configuration(cfg.label_config_file)
lab_dim=label_composer.compute_label_dimension()
## use first feature in label -- hardcoded for now
silence_feature = 0
## Use these to trim silence:
untrimmed_test_labels = binary_label_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
if 'mgc' in cfg.in_dimension_dict:
if cfg.remove_silence_using_binary_labels:
untrimmed_reference_data = in_file_list_dict['mgc'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
trim_silence(untrimmed_reference_data, ref_mgc_list, cfg.mgc_dim, \
untrimmed_test_labels, lab_dim, silence_feature)
else:
remover = SilenceRemover(n_cmp = cfg.mgc_dim, silence_pattern = ['*-#+*'])
remover.remove_silence(in_file_list_dict['mgc'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number], in_gen_label_align_file_list, ref_mgc_list)
valid_spectral_distortion = calculator.compute_distortion(valid_file_id_list, ref_data_dir, gen_dir, cfg.mgc_ext, cfg.mgc_dim)
test_spectral_distortion = calculator.compute_distortion(test_file_id_list , ref_data_dir, gen_dir, cfg.mgc_ext, cfg.mgc_dim)
valid_spectral_distortion *= (10 /numpy.log(10)) * numpy.sqrt(2.0) ##MCD
test_spectral_distortion *= (10 /numpy.log(10)) * numpy.sqrt(2.0) ##MCD
if 'bap' in cfg.in_dimension_dict:
if cfg.remove_silence_using_binary_labels:
untrimmed_reference_data = in_file_list_dict['bap'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
trim_silence(untrimmed_reference_data, ref_bap_list, cfg.bap_dim, \
untrimmed_test_labels, lab_dim, silence_feature)
else:
remover = SilenceRemover(n_cmp = cfg.bap_dim, silence_pattern = ['*-#+*'])
remover.remove_silence(in_file_list_dict['bap'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number], in_gen_label_align_file_list, ref_bap_list)
valid_bap_mse = calculator.compute_distortion(valid_file_id_list, ref_data_dir, gen_dir, cfg.bap_ext, cfg.bap_dim)
test_bap_mse = calculator.compute_distortion(test_file_id_list , ref_data_dir, gen_dir, cfg.bap_ext, cfg.bap_dim)
valid_bap_mse = valid_bap_mse / 10.0 ##Cassia's bap is computed from 10*log|S(w)|. if use HTS/SPTK style, do the same as MGC
test_bap_mse = test_bap_mse / 10.0 ##Cassia's bap is computed from 10*log|S(w)|. if use HTS/SPTK style, do the same as MGC
if 'lf0' in cfg.in_dimension_dict:
if cfg.remove_silence_using_binary_labels:
untrimmed_reference_data = in_file_list_dict['lf0'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
trim_silence(untrimmed_reference_data, ref_lf0_list, cfg.lf0_dim, \
untrimmed_test_labels, lab_dim, silence_feature)
else:
remover = SilenceRemover(n_cmp = cfg.lf0_dim, silence_pattern = ['*-#+*'])
remover.remove_silence(in_file_list_dict['lf0'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number], in_gen_label_align_file_list, ref_lf0_list)
valid_f0_mse, valid_vuv_error = calculator.compute_distortion(valid_file_id_list, ref_data_dir, gen_dir, cfg.lf0_ext, cfg.lf0_dim)
test_f0_mse , test_vuv_error = calculator.compute_distortion(test_file_id_list , ref_data_dir, gen_dir, cfg.lf0_ext, cfg.lf0_dim)
logger.info('Develop: DNN -- MCD: %.3f dB; BAP: %.3f dB; F0: %.3f Hz; VUV: %.3f%%' \
%(valid_spectral_distortion, valid_bap_mse, valid_f0_mse, valid_vuv_error*100.))
logger.info('Test : DNN -- MCD: %.3f dB; BAP: %.3f dB; F0: %.3f Hz; VUV: %.3f%%' \
%(test_spectral_distortion , test_bap_mse , test_f0_mse , test_vuv_error*100.))
# this can be removed
#
if 0: #to calculate distortion of HMM baseline
hmm_gen_no_silence_dir = '/afs/inf.ed.ac.uk/group/project/dnn_tts/data/nick/nick_hmm_pf_2400_no_silence'
hmm_gen_dir = '/afs/inf.ed.ac.uk/group/project/dnn_tts/data/nick/nick_hmm_pf_2400'
if 1:
hmm_mgc_list = prepare_file_path_list(gen_file_id_list, hmm_gen_dir, cfg.mgc_ext)
hmm_bap_list = prepare_file_path_list(gen_file_id_list, hmm_gen_dir, cfg.bap_ext)
hmm_lf0_list = prepare_file_path_list(gen_file_id_list, hmm_gen_dir, cfg.lf0_ext)
hmm_mgc_no_silence_list = prepare_file_path_list(gen_file_id_list, hmm_gen_no_silence_dir, cfg.mgc_ext)
hmm_bap_no_silence_list = prepare_file_path_list(gen_file_id_list, hmm_gen_no_silence_dir, cfg.bap_ext)
hmm_lf0_no_silence_list = prepare_file_path_list(gen_file_id_list, hmm_gen_no_silence_dir, cfg.lf0_ext)
in_gen_label_align_file_list = in_label_align_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
remover = SilenceRemover(n_cmp = cfg.mgc_dim, silence_pattern = ['*-#+*'])
remover.remove_silence(hmm_mgc_list, in_gen_label_align_file_list, hmm_mgc_no_silence_list)
remover = SilenceRemover(n_cmp = cfg.bap_dim, silence_pattern = ['*-#+*'])
remover.remove_silence(hmm_bap_list, in_gen_label_align_file_list, hmm_bap_no_silence_list)
remover = SilenceRemover(n_cmp = cfg.lf0_dim, silence_pattern = ['*-#+*'])
remover.remove_silence(hmm_lf0_list, in_gen_label_align_file_list, hmm_lf0_no_silence_list)
calculator = IndividualDistortionComp()
spectral_distortion = calculator.compute_distortion(valid_file_id_list, ref_data_dir, hmm_gen_no_silence_dir, cfg.mgc_ext, cfg.mgc_dim)
bap_mse = calculator.compute_distortion(valid_file_id_list, ref_data_dir, hmm_gen_no_silence_dir, cfg.bap_ext, cfg.bap_dim)
f0_mse, vuv_error = calculator.compute_distortion(valid_file_id_list, ref_data_dir, hmm_gen_no_silence_dir, cfg.lf0_ext, cfg.lf0_dim)
spectral_distortion *= (10 /numpy.log(10)) * numpy.sqrt(2.0)
bap_mse = bap_mse / 10.0
logger.info('Develop: HMM -- MCD: %.3f dB; BAP: %.3f dB; F0: %.3f Hz; VUV: %.3f%%' %(spectral_distortion, bap_mse, f0_mse, vuv_error*100.))
spectral_distortion = calculator.compute_distortion(test_file_id_list, ref_data_dir, hmm_gen_no_silence_dir, cfg.mgc_ext, cfg.mgc_dim)
bap_mse = calculator.compute_distortion(test_file_id_list, ref_data_dir, hmm_gen_no_silence_dir, cfg.bap_ext, cfg.bap_dim)
f0_mse, vuv_error = calculator.compute_distortion(test_file_id_list, ref_data_dir, hmm_gen_no_silence_dir, cfg.lf0_ext, cfg.lf0_dim)
spectral_distortion *= (10 /numpy.log(10)) * numpy.sqrt(2.0)
bap_mse = bap_mse / 10.0
logger.info('Test : HMM -- MCD: %.3f dB; BAP: %.3f dB; F0: %.3f Hz; VUV: %.3f%%' %(spectral_distortion, bap_mse, f0_mse, vuv_error*100.))
if __name__ == '__main__':
# these things should be done even before trying to parse the command line
# create a configuration instance
# and get a short name for this instance
cfg=configuration.cfg
# set up logging to use our custom class
logging.setLoggerClass(LoggerPlotter)
# get a logger for this main function
logger = logging.getLogger("main")
if len(sys.argv) != 2:
logger.critical('usage: run_dnn.sh [config file name]')
sys.exit(1)
config_file = sys.argv[1]
config_file = os.path.abspath(config_file)
cfg.configure(config_file)
if cfg.profile:
logger.info('profiling is activated')
import cProfile, pstats
cProfile.run('main_function(cfg)', 'mainstats')
# create a stream for the profiler to write to
profiling_output = io.StringIO()
p = pstats.Stats('mainstats', stream=profiling_output)
# print stats to that stream
# here we just report the top 10 functions, sorted by total amount of time spent in each
p.strip_dirs().sort_stats('tottime').print_stats(10)
# print the result to the log
logger.info('---Profiling result follows---\n%s' % profiling_output.getvalue() )
profiling_output.close()
logger.info('---End of profiling result---')
else:
main_function(cfg)
sys.exit(0)
| apache-2.0 |
nickdex/cosmos | code/artificial_intelligence/src/artificial_neural_network/ann.py | 3 | 1384 | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
dataset = pd.read_csv("dataset.csv")
X = dataset.iloc[:, 3:13].values
y = dataset.iloc[:, 13].values
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelencoder_X_1 = LabelEncoder()
X[:, 1] = labelencoder_X_1.fit_transform(X[:, 1])
labelencoder_X_2 = LabelEncoder()
X[:, 2] = labelencoder_X_2.fit_transform(X[:, 2])
onehotencoder = OneHotEncoder(categorical_features=[1])
X = onehotencoder.fit_transform(X).toarray()
X = X[:, 1:]
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
import keras
from keras.models import Sequential
from keras.layers import Dense
classifier = Sequential()
classifier.add(
Dense(units=6, kernel_initializer="uniform", activation="relu", input_dim=11)
)
classifier.add(Dense(units=6, kernel_initializer="uniform", activation="relu"))
classifier.add(Dense(units=1, kernel_initializer="uniform", activation="sigmoid"))
classifier.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])
classifier.fit(X_train, y_train, batch_size=10, epochs=100)
y_pred = classifier.predict(X_test)
y_pred = y_pred > 0.5
| gpl-3.0 |
imk1/IMKTFBindingCode | makeClustergram.py | 1 | 2060 | import sys
import argparse
import pylab
import numpy as np
import matplotlib.pyplot as plt
from heatmapcluster import heatmapcluster
def parseArgument():
# Parse the input
parser=argparse.ArgumentParser(description=\
"Make a clustergram of a matrix")
parser.add_argument("--matFileName", required=True,\
help='File with matrix')
parser.add_argument("--columnNamesFileName", required=True,\
help='File with column names')
parser.add_argument("--clusterFileName", required=True,\
help='File where cluster information will be recorded')
parser.add_argument("--metric", required=False, default='euclidean', \
help='Metric for clustering')
parser.add_argument("--contextIndex", required=False, type=int, \
default=None, \
help='Consider only a specific row')
parser.add_argument("--logSignals", action='store_true', required=False,\
help='log the signals')
options = parser.parse_args()
return options
def getStringList(stringFileName):
# Get a list of strings from a file
stringFile = open(stringFileName)
stringList = [line.strip() for line in stringFile]
stringFile.close()
return stringList
def makeClustergram(options):
# Make a clustergram of a matrix
sys.setrecursionlimit(100000)
mat = np.loadtxt(options.matFileName, dtype=np.float16)
if options.logSignals:
# log2 the signal values
mat = np.log2(mat + 1)
columnNames = getStringList(options.columnNamesFileName)
if options.contextIndex is not None:
# Considering the data in the context of a TF
rowsToConsider = np.nonzero(mat[:,options.contextIndex])
mat = mat[rowsToConsider,:].squeeze()
rowNames = [str(i) for i in range(mat.shape[0])]
pylab.figure()
h = heatmapcluster(mat, rowNames, columnNames,
num_row_clusters=None, num_col_clusters=None,
label_fontsize=8,
xlabel_rotation=-75,
cmap=plt.cm.YlOrRd,
show_colorbar=True,
top_dendrogram=True,
metric=options.metric)
np.savetxt(options.clusterFileName, h.row_linkage, fmt='%.4f')
pylab.show()
if __name__ == "__main__":
options = parseArgument()
makeClustergram(options)
| mit |
owlabs/incubator-airflow | airflow/hooks/dbapi_hook.py | 1 | 11082 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from builtins import str
from past.builtins import basestring
from datetime import datetime
from contextlib import closing
import sys
from typing import Optional
from sqlalchemy import create_engine
from airflow.hooks.base_hook import BaseHook
from airflow.exceptions import AirflowException
class DbApiHook(BaseHook):
"""
Abstract base class for sql hooks.
"""
# Override to provide the connection name.
conn_name_attr = None # type: Optional[str]
# Override to have a default connection id for a particular dbHook
default_conn_name = 'default_conn_id'
# Override if this db supports autocommit.
supports_autocommit = False
# Override with the object that exposes the connect method
connector = None
def __init__(self, *args, **kwargs):
if not self.conn_name_attr:
raise AirflowException("conn_name_attr is not defined")
elif len(args) == 1:
setattr(self, self.conn_name_attr, args[0])
elif self.conn_name_attr not in kwargs:
setattr(self, self.conn_name_attr, self.default_conn_name)
else:
setattr(self, self.conn_name_attr, kwargs[self.conn_name_attr])
def get_conn(self):
"""Returns a connection object
"""
db = self.get_connection(getattr(self, self.conn_name_attr))
return self.connector.connect(
host=db.host,
port=db.port,
username=db.login,
schema=db.schema)
def get_uri(self):
conn = self.get_connection(getattr(self, self.conn_name_attr))
login = ''
if conn.login:
login = '{conn.login}:{conn.password}@'.format(conn=conn)
host = conn.host
if conn.port is not None:
host += ':{port}'.format(port=conn.port)
uri = '{conn.conn_type}://{login}{host}/'.format(
conn=conn, login=login, host=host)
if conn.schema:
uri += conn.schema
return uri
def get_sqlalchemy_engine(self, engine_kwargs=None):
if engine_kwargs is None:
engine_kwargs = {}
return create_engine(self.get_uri(), **engine_kwargs)
def get_pandas_df(self, sql, parameters=None):
"""
Executes the sql and returns a pandas dataframe
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
if sys.version_info[0] < 3:
sql = sql.encode('utf-8')
import pandas.io.sql as psql
with closing(self.get_conn()) as conn:
return psql.read_sql(sql, con=conn, params=parameters)
def get_records(self, sql, parameters=None):
"""
Executes the sql and returns a set of records.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
if sys.version_info[0] < 3:
sql = sql.encode('utf-8')
with closing(self.get_conn()) as conn:
with closing(conn.cursor()) as cur:
if parameters is not None:
cur.execute(sql, parameters)
else:
cur.execute(sql)
return cur.fetchall()
def get_first(self, sql, parameters=None):
"""
Executes the sql and returns the first resulting row.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
if sys.version_info[0] < 3:
sql = sql.encode('utf-8')
with closing(self.get_conn()) as conn:
with closing(conn.cursor()) as cur:
if parameters is not None:
cur.execute(sql, parameters)
else:
cur.execute(sql)
return cur.fetchone()
def run(self, sql, autocommit=False, parameters=None):
"""
Runs a command or a list of commands. Pass a list of sql
statements to the sql parameter to get them to execute
sequentially
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param autocommit: What to set the connection's autocommit setting to
before executing the query.
:type autocommit: bool
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
if isinstance(sql, basestring):
sql = [sql]
with closing(self.get_conn()) as conn:
if self.supports_autocommit:
self.set_autocommit(conn, autocommit)
with closing(conn.cursor()) as cur:
for s in sql:
if sys.version_info[0] < 3:
s = s.encode('utf-8')
if parameters is not None:
self.log.info("{} with parameters {}".format(s, parameters))
cur.execute(s, parameters)
else:
self.log.info(s)
cur.execute(s)
# If autocommit was set to False for db that supports autocommit,
# or if db does not supports autocommit, we do a manual commit.
if not self.get_autocommit(conn):
conn.commit()
def set_autocommit(self, conn, autocommit):
"""
Sets the autocommit flag on the connection
"""
if not self.supports_autocommit and autocommit:
self.log.warning(
"%s connection doesn't support autocommit but autocommit activated.",
getattr(self, self.conn_name_attr)
)
conn.autocommit = autocommit
def get_autocommit(self, conn):
"""
Get autocommit setting for the provided connection.
Return True if conn.autocommit is set to True.
Return False if conn.autocommit is not set or set to False or conn
does not support autocommit.
:param conn: Connection to get autocommit setting from.
:type conn: connection object.
:return: connection autocommit setting.
:rtype: bool
"""
return getattr(conn, 'autocommit', False) and self.supports_autocommit
def get_cursor(self):
"""
Returns a cursor
"""
return self.get_conn().cursor()
def insert_rows(self, table, rows, target_fields=None, commit_every=1000,
replace=False):
"""
A generic way to insert a set of tuples into a table,
a new transaction is created every commit_every rows
:param table: Name of the target table
:type table: str
:param rows: The rows to insert into the table
:type rows: iterable of tuples
:param target_fields: The names of the columns to fill in the table
:type target_fields: iterable of strings
:param commit_every: The maximum number of rows to insert in one
transaction. Set to 0 to insert all rows in one transaction.
:type commit_every: int
:param replace: Whether to replace instead of insert
:type replace: bool
"""
if target_fields:
target_fields = ", ".join(target_fields)
target_fields = "({})".format(target_fields)
else:
target_fields = ''
i = 0
with closing(self.get_conn()) as conn:
if self.supports_autocommit:
self.set_autocommit(conn, False)
conn.commit()
with closing(conn.cursor()) as cur:
for i, row in enumerate(rows, 1):
lst = []
for cell in row:
lst.append(self._serialize_cell(cell, conn))
values = tuple(lst)
placeholders = ["%s", ] * len(values)
if not replace:
sql = "INSERT INTO "
else:
sql = "REPLACE INTO "
sql += "{0} {1} VALUES ({2})".format(
table,
target_fields,
",".join(placeholders))
cur.execute(sql, values)
if commit_every and i % commit_every == 0:
conn.commit()
self.log.info(
"Loaded %s into %s rows so far", i, table
)
conn.commit()
self.log.info("Done loading. Loaded a total of %s rows", i)
@staticmethod
def _serialize_cell(cell, conn=None):
"""
Returns the SQL literal of the cell as a string.
:param cell: The cell to insert into the table
:type cell: object
:param conn: The database connection
:type conn: connection object
:return: The serialized cell
:rtype: str
"""
if cell is None:
return None
if isinstance(cell, datetime):
return cell.isoformat()
return str(cell)
def bulk_dump(self, table, tmp_file):
"""
Dumps a database table into a tab-delimited file
:param table: The name of the source table
:type table: str
:param tmp_file: The path of the target file
:type tmp_file: str
"""
raise NotImplementedError()
def bulk_load(self, table, tmp_file):
"""
Loads a tab-delimited file into a database table
:param table: The name of the target table
:type table: str
:param tmp_file: The path of the file to load into the table
:type tmp_file: str
"""
raise NotImplementedError()
| apache-2.0 |
hobson/totalgood | totalgood/pacs/predictor.py | 1 | 4199 | #!python manage.py shell_plus <
import pandas as pd
np = pd.np
np.norm = np.linalg.norm
import sklearn
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer, TfidfVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.cross_validation import train_test_split
from pacs.models import RawCommittees
pipeline = Pipeline([
('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', SGDClassifier()),
])
def debug():
import ipdb
ipdb.set_trace()
class PACClassifier(SGDClassifier):
def __init__(self,
names=np.array(RawCommittees.objects.values_list('committee_name', flat=True)),
labels=RawCommittees.objects.values_list('committee_type', 'committee_subtype'),
alpha=1e-5,
penalty='l1',
verbosity=1,
):
"""Train a classifier that predicts a committee type, subtype (label) from its name
Args:
names (array of str): the committee names (space-delimitted words with a few words)
labels (array of 2-tuple of str): the committee_type and subtype, Nones/NaNs/floats are stringified
alpha (float): learning rate (sklearn TFIDF classifier examples use 1e-5 to 1e-6)
default: 1e-5
penalty: 'none', 'l2', 'l1', or 'elasticnet' # regularization penalty on the feature weights
Returns:
SGDClassifier: Trained SVM classifier instance
"""
super(PACClassifier, self).__init__(alpha=alpha, penalty=penalty)
if verbosity is not None:
self.verbosity = verbosity
# vectorizer = CountVectorizer(min_df=1)
# word_bag = vectorizer.fit_transform(self.names)
# print(word_bag)
self.names = (names if isinstance(names, (list, np.ndarray))
else RawCommittees.objects.values_list('committee_name', flat=True))
self.pac_type_tuples = RawCommittees.objects.values_list('committee_type', 'committee_subtype')
self.labels = np.array(list(labels or self.pac_type_tuples))
# self.labels = [', '.join(str(s) for s in pt) for pt in self.pac_type_tuples]
self.labels = np.array([str(lbl) for lbl in self.labels])
self.label_set = sorted(np.unique(self.labels))
self.label_dict = dict(list(zip(self.label_set, range(len(self.label_set)))))
self.label_ints = np.array([self.label_dict[label] for label in self.labels])
if self.verbosity > 1:
print(pd.Series(self.labels))
if self.verbosity > 0:
print(np.unique(self.labels))
self.tfidf = TfidfVectorizer(analyzer='word', ngram_range=(1, 1), stop_words='english')
self.tfidf_matrix = self.tfidf.fit_transform(self.names)
if verbosity > 1:
print(self.tfidf.get_feature_names())
self.train_tfidf, self.test_tfidf, self.train_labels, self.test_labels = train_test_split(
self.tfidf_matrix, self.label_ints, test_size=.25)
# alpha: learning rate (default 1e-4, but other TFIDF classifier examples use 1e-5 to 1e-6)
# penalty: 'none', 'l2', 'l1', or 'elasticnet' # regularization penalty on the feature weights
self.svn_matrix = self.fit(self.train_tfidf, self.train_labels)
if verbosity > 0:
print(self.score(self.train_tfidf, self.train_labels))
# Typically > 98% recall (accuracy on training set)
def predict_pac_type(self, name):
name = str(name)
vec = self.tfidf.transform(name)
predicted_label = self.predict(vec)
print(predicted_label)
return predicted_label
def similarity(self, name1, name2):
# tfidf is already normalized, so no need to divide by the norm of each vector?
vec1, vec2 = self.tfidf.transform(np.array([name1, name2]))
# cosine distance between two tfidf vectors
return vec1.dot(vec2.T)[0, 0]
def similarity_matrix(self):
return self.tfidf_matrix * self.tfidf_matrix.T
| mit |
hlin117/statsmodels | statsmodels/tools/tests/test_tools.py | 26 | 18818 | """
Test functions for models.tools
"""
from statsmodels.compat.python import lrange, range
import numpy as np
from numpy.random import standard_normal
from numpy.testing import (assert_equal, assert_array_equal,
assert_almost_equal, assert_string_equal, TestCase)
from nose.tools import (assert_true, assert_false, assert_raises)
from statsmodels.datasets import longley
from statsmodels.tools import tools
from statsmodels.tools.tools import pinv_extended
from statsmodels.compat.numpy import np_matrix_rank
class TestTools(TestCase):
def test_add_constant_list(self):
x = lrange(1,5)
x = tools.add_constant(x)
y = np.asarray([[1,1,1,1],[1,2,3,4.]]).T
assert_equal(x, y)
def test_add_constant_1d(self):
x = np.arange(1,5)
x = tools.add_constant(x)
y = np.asarray([[1,1,1,1],[1,2,3,4.]]).T
assert_equal(x, y)
def test_add_constant_has_constant1d(self):
x = np.ones(5)
x = tools.add_constant(x, has_constant='skip')
assert_equal(x, np.ones(5))
assert_raises(ValueError, tools.add_constant, x, has_constant='raise')
assert_equal(tools.add_constant(x, has_constant='add'),
np.ones((5, 2)))
def test_add_constant_has_constant2d(self):
x = np.asarray([[1,1,1,1],[1,2,3,4.]]).T
y = tools.add_constant(x, has_constant='skip')
assert_equal(x, y)
assert_raises(ValueError, tools.add_constant, x, has_constant='raise')
assert_equal(tools.add_constant(x, has_constant='add'),
np.column_stack((np.ones(4), x)))
def test_recipr(self):
X = np.array([[2,1],[-1,0]])
Y = tools.recipr(X)
assert_almost_equal(Y, np.array([[0.5,1],[0,0]]))
def test_recipr0(self):
X = np.array([[2,1],[-4,0]])
Y = tools.recipr0(X)
assert_almost_equal(Y, np.array([[0.5,1],[-0.25,0]]))
def test_rank(self):
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
X = standard_normal((40,10))
self.assertEquals(tools.rank(X), np_matrix_rank(X))
X[:,0] = X[:,1] + X[:,2]
self.assertEquals(tools.rank(X), np_matrix_rank(X))
def test_extendedpinv(self):
X = standard_normal((40, 10))
np_inv = np.linalg.pinv(X)
np_sing_vals = np.linalg.svd(X, 0, 0)
sm_inv, sing_vals = pinv_extended(X)
assert_almost_equal(np_inv, sm_inv)
assert_almost_equal(np_sing_vals, sing_vals)
def test_extendedpinv_singular(self):
X = standard_normal((40, 10))
X[:, 5] = X[:, 1] + X[:, 3]
np_inv = np.linalg.pinv(X)
np_sing_vals = np.linalg.svd(X, 0, 0)
sm_inv, sing_vals = pinv_extended(X)
assert_almost_equal(np_inv, sm_inv)
assert_almost_equal(np_sing_vals, sing_vals)
def test_fullrank(self):
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
X = standard_normal((40,10))
X[:,0] = X[:,1] + X[:,2]
Y = tools.fullrank(X)
self.assertEquals(Y.shape, (40,9))
self.assertEquals(tools.rank(Y), 9)
X[:,5] = X[:,3] + X[:,4]
Y = tools.fullrank(X)
self.assertEquals(Y.shape, (40,8))
warnings.simplefilter("ignore")
self.assertEquals(tools.rank(Y), 8)
def test_estimable():
rng = np.random.RandomState(20120713)
N, P = (40, 10)
X = rng.normal(size=(N, P))
C = rng.normal(size=(1, P))
isestimable = tools.isestimable
assert_true(isestimable(C, X))
assert_true(isestimable(np.eye(P), X))
for row in np.eye(P):
assert_true(isestimable(row, X))
X = np.ones((40, 2))
assert_true(isestimable([1, 1], X))
assert_false(isestimable([1, 0], X))
assert_false(isestimable([0, 1], X))
assert_false(isestimable(np.eye(2), X))
halfX = rng.normal(size=(N, 5))
X = np.hstack([halfX, halfX])
assert_false(isestimable(np.hstack([np.eye(5), np.zeros((5, 5))]), X))
assert_false(isestimable(np.hstack([np.zeros((5, 5)), np.eye(5)]), X))
assert_true(isestimable(np.hstack([np.eye(5), np.eye(5)]), X))
# Test array-like for design
XL = X.tolist()
assert_true(isestimable(np.hstack([np.eye(5), np.eye(5)]), XL))
# Test ValueError for incorrect number of columns
X = rng.normal(size=(N, 5))
for n in range(1, 4):
assert_raises(ValueError, isestimable, np.ones((n,)), X)
assert_raises(ValueError, isestimable, np.eye(4), X)
class TestCategoricalNumerical(object):
#TODO: use assert_raises to check that bad inputs are taken care of
def __init__(self):
#import string
stringabc = 'abcdefghijklmnopqrstuvwxy'
self.des = np.random.randn(25,2)
self.instr = np.floor(np.arange(10,60, step=2)/10)
x=np.zeros((25,5))
x[:5,0]=1
x[5:10,1]=1
x[10:15,2]=1
x[15:20,3]=1
x[20:25,4]=1
self.dummy = x
structdes = np.zeros((25,1),dtype=[('var1', 'f4'),('var2', 'f4'),
('instrument','f4'),('str_instr','a10')])
structdes['var1'] = self.des[:,0][:,None]
structdes['var2'] = self.des[:,1][:,None]
structdes['instrument'] = self.instr[:,None]
string_var = [stringabc[0:5], stringabc[5:10],
stringabc[10:15], stringabc[15:20],
stringabc[20:25]]
string_var *= 5
self.string_var = np.array(sorted(string_var))
structdes['str_instr'] = self.string_var[:,None]
self.structdes = structdes
self.recdes = structdes.view(np.recarray)
def test_array2d(self):
des = np.column_stack((self.des, self.instr, self.des))
des = tools.categorical(des, col=2)
assert_array_equal(des[:,-5:], self.dummy)
assert_equal(des.shape[1],10)
def test_array1d(self):
des = tools.categorical(self.instr)
assert_array_equal(des[:,-5:], self.dummy)
assert_equal(des.shape[1],6)
def test_array2d_drop(self):
des = np.column_stack((self.des, self.instr, self.des))
des = tools.categorical(des, col=2, drop=True)
assert_array_equal(des[:,-5:], self.dummy)
assert_equal(des.shape[1],9)
def test_array1d_drop(self):
des = tools.categorical(self.instr, drop=True)
assert_array_equal(des, self.dummy)
assert_equal(des.shape[1],5)
def test_recarray2d(self):
des = tools.categorical(self.recdes, col='instrument')
# better way to do this?
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_recarray2dint(self):
des = tools.categorical(self.recdes, col=2)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_recarray1d(self):
instr = self.structdes['instrument'].view(np.recarray)
dum = tools.categorical(instr)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names[-5:]]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 6)
def test_recarray1d_drop(self):
instr = self.structdes['instrument'].view(np.recarray)
dum = tools.categorical(instr, drop=True)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 5)
def test_recarray2d_drop(self):
des = tools.categorical(self.recdes, col='instrument', drop=True)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 8)
def test_structarray2d(self):
des = tools.categorical(self.structdes, col='instrument')
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_structarray2dint(self):
des = tools.categorical(self.structdes, col=2)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_structarray1d(self):
instr = self.structdes['instrument'].view(dtype=[('var1', 'f4')])
dum = tools.categorical(instr)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names[-5:]]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 6)
def test_structarray2d_drop(self):
des = tools.categorical(self.structdes, col='instrument', drop=True)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 8)
def test_structarray1d_drop(self):
instr = self.structdes['instrument'].view(dtype=[('var1', 'f4')])
dum = tools.categorical(instr, drop=True)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 5)
# def test_arraylike2d(self):
# des = tools.categorical(self.structdes.tolist(), col=2)
# test_des = des[:,-5:]
# assert_array_equal(test_des, self.dummy)
# assert_equal(des.shape[1], 9)
# def test_arraylike1d(self):
# instr = self.structdes['instrument'].tolist()
# dum = tools.categorical(instr)
# test_dum = dum[:,-5:]
# assert_array_equal(test_dum, self.dummy)
# assert_equal(dum.shape[1], 6)
# def test_arraylike2d_drop(self):
# des = tools.categorical(self.structdes.tolist(), col=2, drop=True)
# test_des = des[:,-5:]
# assert_array_equal(test__des, self.dummy)
# assert_equal(des.shape[1], 8)
# def test_arraylike1d_drop(self):
# instr = self.structdes['instrument'].tolist()
# dum = tools.categorical(instr, drop=True)
# assert_array_equal(dum, self.dummy)
# assert_equal(dum.shape[1], 5)
class TestCategoricalString(TestCategoricalNumerical):
# comment out until we have type coercion
# def test_array2d(self):
# des = np.column_stack((self.des, self.instr, self.des))
# des = tools.categorical(des, col=2)
# assert_array_equal(des[:,-5:], self.dummy)
# assert_equal(des.shape[1],10)
# def test_array1d(self):
# des = tools.categorical(self.instr)
# assert_array_equal(des[:,-5:], self.dummy)
# assert_equal(des.shape[1],6)
# def test_array2d_drop(self):
# des = np.column_stack((self.des, self.instr, self.des))
# des = tools.categorical(des, col=2, drop=True)
# assert_array_equal(des[:,-5:], self.dummy)
# assert_equal(des.shape[1],9)
def test_array1d_drop(self):
des = tools.categorical(self.string_var, drop=True)
assert_array_equal(des, self.dummy)
assert_equal(des.shape[1],5)
def test_recarray2d(self):
des = tools.categorical(self.recdes, col='str_instr')
# better way to do this?
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_recarray2dint(self):
des = tools.categorical(self.recdes, col=3)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_recarray1d(self):
instr = self.structdes['str_instr'].view(np.recarray)
dum = tools.categorical(instr)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names[-5:]]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 6)
def test_recarray1d_drop(self):
instr = self.structdes['str_instr'].view(np.recarray)
dum = tools.categorical(instr, drop=True)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 5)
def test_recarray2d_drop(self):
des = tools.categorical(self.recdes, col='str_instr', drop=True)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 8)
def test_structarray2d(self):
des = tools.categorical(self.structdes, col='str_instr')
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_structarray2dint(self):
des = tools.categorical(self.structdes, col=3)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_structarray1d(self):
instr = self.structdes['str_instr'].view(dtype=[('var1', 'a10')])
dum = tools.categorical(instr)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names[-5:]]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 6)
def test_structarray2d_drop(self):
des = tools.categorical(self.structdes, col='str_instr', drop=True)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 8)
def test_structarray1d_drop(self):
instr = self.structdes['str_instr'].view(dtype=[('var1', 'a10')])
dum = tools.categorical(instr, drop=True)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 5)
def test_arraylike2d(self):
pass
def test_arraylike1d(self):
pass
def test_arraylike2d_drop(self):
pass
def test_arraylike1d_drop(self):
pass
def test_rec_issue302():
arr = np.rec.fromrecords([[10], [11]], names='group')
actual = tools.categorical(arr)
expected = np.rec.array([(10, 1.0, 0.0), (11, 0.0, 1.0)],
dtype=[('group', int), ('group_10', float), ('group_11', float)])
assert_array_equal(actual, expected)
def test_issue302():
arr = np.rec.fromrecords([[10, 12], [11, 13]], names=['group', 'whatever'])
actual = tools.categorical(arr, col=['group'])
expected = np.rec.array([(10, 12, 1.0, 0.0), (11, 13, 0.0, 1.0)],
dtype=[('group', int), ('whatever', int), ('group_10', float),
('group_11', float)])
assert_array_equal(actual, expected)
def test_pandas_const_series():
dta = longley.load_pandas()
series = dta.exog['GNP']
series = tools.add_constant(series, prepend=False)
assert_string_equal('const', series.columns[1])
assert_equal(series.var(0)[1], 0)
def test_pandas_const_series_prepend():
dta = longley.load_pandas()
series = dta.exog['GNP']
series = tools.add_constant(series, prepend=True)
assert_string_equal('const', series.columns[0])
assert_equal(series.var(0)[0], 0)
def test_pandas_const_df():
dta = longley.load_pandas().exog
dta = tools.add_constant(dta, prepend=False)
assert_string_equal('const', dta.columns[-1])
assert_equal(dta.var(0)[-1], 0)
def test_pandas_const_df_prepend():
dta = longley.load_pandas().exog
# regression test for #1025
dta['UNEMP'] /= dta['UNEMP'].std()
dta = tools.add_constant(dta, prepend=True)
assert_string_equal('const', dta.columns[0])
assert_equal(dta.var(0)[0], 0)
def test_chain_dot():
A = np.arange(1,13).reshape(3,4)
B = np.arange(3,15).reshape(4,3)
C = np.arange(5,8).reshape(3,1)
assert_equal(tools.chain_dot(A,B,C), np.array([[1820],[4300],[6780]]))
class TestNanDot(object):
@classmethod
def setupClass(cls):
nan = np.nan
cls.mx_1 = np.array([[nan, 1.], [2., 3.]])
cls.mx_2 = np.array([[nan, nan], [2., 3.]])
cls.mx_3 = np.array([[0., 0.], [0., 0.]])
cls.mx_4 = np.array([[1., 0.], [1., 0.]])
cls.mx_5 = np.array([[0., 1.], [0., 1.]])
cls.mx_6 = np.array([[1., 2.], [3., 4.]])
def test_11(self):
test_res = tools.nan_dot(self.mx_1, self.mx_1)
expected_res = np.array([[ np.nan, np.nan], [ np.nan, 11.]])
assert_array_equal(test_res, expected_res)
def test_12(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_1, self.mx_2)
expected_res = np.array([[ nan, nan], [ nan, nan]])
assert_array_equal(test_res, expected_res)
def test_13(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_1, self.mx_3)
expected_res = np.array([[ 0., 0.], [ 0., 0.]])
assert_array_equal(test_res, expected_res)
def test_14(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_1, self.mx_4)
expected_res = np.array([[ nan, 0.], [ 5., 0.]])
assert_array_equal(test_res, expected_res)
def test_41(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_4, self.mx_1)
expected_res = np.array([[ nan, 1.], [ nan, 1.]])
assert_array_equal(test_res, expected_res)
def test_23(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_2, self.mx_3)
expected_res = np.array([[ 0., 0.], [ 0., 0.]])
assert_array_equal(test_res, expected_res)
def test_32(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_3, self.mx_2)
expected_res = np.array([[ 0., 0.], [ 0., 0.]])
assert_array_equal(test_res, expected_res)
def test_24(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_2, self.mx_4)
expected_res = np.array([[ nan, 0.], [ 5., 0.]])
assert_array_equal(test_res, expected_res)
def test_25(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_2, self.mx_5)
expected_res = np.array([[ 0., nan], [ 0., 5.]])
assert_array_equal(test_res, expected_res)
def test_66(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_6, self.mx_6)
expected_res = np.array([[ 7., 10.], [ 15., 22.]])
assert_array_equal(test_res, expected_res)
| bsd-3-clause |
ahellander/pyurdme | examples/yeast_polarization/G_protein_cycle.py | 5 | 4520 | #!/usr/bin/env python
""" pyURDME model file for the polarization 1D example. """
import os
import sys
import pyurdme
import dolfin
import math
import matplotlib.pyplot as plt
import numpy
# Sub domain for Periodic boundary condition
class PeriodicBoundary1D(dolfin.SubDomain):
def __init__(self, a=0.0, b=1.0):
""" 1D domain from a to b. """
dolfin.SubDomain.__init__(self)
self.a = a
self.b = b
def inside(self, x, on_boundary):
return not bool((dolfin.near(x[0], self.b)) and on_boundary)
def map(self, x, y):
if dolfin.near(x[0], self.b):
y[0] = self.a + (x[0] - self.b)
class PheromoneGradient(pyurdme.URDMEDataFunction):
def __init__(self, a=0.0, b=1.0, L_min=0, L_max=4, MOLAR=1.0):
""" 1D domain from a to b. """
pyurdme.URDMEDataFunction.__init__(self, name="PheromoneGradient")
self.a = a
self.b = b
self.L_min = L_min
self.L_max = L_max
self.MOLAR = MOLAR
def map(self, x):
ret = ((self.L_max - self.L_min) * 0.5 * (1 + math.cos(0.5*x[0])) + self.L_min) * self.MOLAR
return ret
class G_protein_cycle_1D(pyurdme.URDMEModel):
def __init__(self,model_name="G_protein_cycle_1D"):
pyurdme.URDMEModel.__init__(self,model_name)
# Species
# R RL G Ga Gbg Gd
R = pyurdme.Species(name="R", diffusion_constant=0.01)
RL = pyurdme.Species(name="RL", diffusion_constant=0.01)
G = pyurdme.Species(name="G", diffusion_constant=0.01)
Ga = pyurdme.Species(name="Ga", diffusion_constant=0.01)
Gbg = pyurdme.Species(name="Gbg",diffusion_constant=0.01)
Gd = pyurdme.Species(name="Gd", diffusion_constant=0.01)
self.add_species([R,RL,G,Ga,Gbg,Gd])
L = 4*3.14159
NUM_VOXEL = 200
MOLAR=6.02e-01*((L/NUM_VOXEL)**3)
self.mesh = pyurdme.URDMEMesh.generate_interval_mesh(nx=NUM_VOXEL, a=-2*3.14159, b=2*3.14159, periodic=True)
SA = pyurdme.Parameter(name="SA" ,expression=201.056)
V = pyurdme.Parameter(name="V" ,expression=33.5)
k_RL = pyurdme.Parameter(name="k_RL" ,expression=2e-03/MOLAR)
k_RLm = pyurdme.Parameter(name="k_RLm" ,expression=1e-02)
k_Rs = pyurdme.Parameter(name="k_Rs" ,expression="4.0/SA")
k_Rd0 = pyurdme.Parameter(name="k_Rd0" ,expression=4e-04)
k_Rd1 = pyurdme.Parameter(name="k_Rd1" ,expression=4e-04)
k_G1 = pyurdme.Parameter(name="k_G1" ,expression="1.0*SA")
k_Ga = pyurdme.Parameter(name="k_Ga" ,expression="1e-06*SA")
k_Gd = pyurdme.Parameter(name="k_Gd" ,expression=0.1)
self.add_parameter([SA,V,k_RL,k_RLm,k_Rs,k_Rd0,k_Rd1,k_G1,k_Ga,k_Gd])
# Add Data Function to model the mating pheromone gradient.
self.add_data_function(PheromoneGradient(a=-2*3.14159, b=2*3.14159, MOLAR=MOLAR))
# Reactions
R0 = pyurdme.Reaction(name="R0", reactants={}, products={R:1}, massaction=True, rate=k_Rs)
R1 = pyurdme.Reaction(name="R1", reactants={R:1}, products={}, massaction=True, rate=k_Rd0)
R2 = pyurdme.Reaction(name="R2", reactants={R:1}, products={RL:1}, propensity_function="k_RL*R*PheromoneGradient/vol")
R3 = pyurdme.Reaction(name="R3", reactants={RL:1}, products={R:1}, massaction=True, rate=k_RLm)
R4 = pyurdme.Reaction(name="R4", reactants={RL:1}, products={}, massaction=True, rate=k_RLm)
R5 = pyurdme.Reaction(name="R5", reactants={G:1}, products={Ga:1, Gbg:1}, propensity_function="k_Ga*RL*G/vol")
R6 = pyurdme.Reaction(name="R6", reactants={Ga:1}, products={Gd:1}, massaction=True, rate=k_Ga)
R7 = pyurdme.Reaction(name="R7", reactants={Gd:1, Gbg:1}, products={G:1}, massaction=True, rate=k_G1)
self.add_reaction([R0,R1,R2,R3,R4,R5,R6,R7])
# Distribute molecules randomly over the mesh according to their initial values
self.set_initial_condition_scatter({R:10000})
self.set_initial_condition_scatter({G:10000})
self.timespan(range(201))
if __name__=="__main__":
""" Dump model to a file. """
model = G_protein_cycle_1D()
result = model.run()
x_vals = model.mesh.coordinates()[:, 0]
G = result.get_species("G", timepoints=49)
Gbg = result.get_species("Gbg", timepoints=49)
plt.plot(x_vals, Gbg)
plt.title('Gbg at t=49')
plt.xlabel('Space')
plt.ylabel('Number of Molecules')
plt.show()
| gpl-3.0 |
wooga/airflow | tests/providers/apache/hive/hooks/test_hive.py | 1 | 34850 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import datetime
import itertools
import os
import unittest
from collections import OrderedDict, namedtuple
from unittest import mock
import pandas as pd
from hmsclient import HMSClient
from airflow.exceptions import AirflowException
from airflow.models.connection import Connection
from airflow.models.dag import DAG
from airflow.providers.apache.hive.hooks.hive import HiveMetastoreHook, HiveServer2Hook
from airflow.secrets.environment_variables import CONN_ENV_PREFIX
from airflow.utils import timezone
from airflow.utils.operator_helpers import AIRFLOW_VAR_NAME_FORMAT_MAPPING
from tests.test_utils.asserts import assert_equal_ignore_multiple_spaces
from tests.test_utils.mock_hooks import MockHiveCliHook, MockHiveServer2Hook
from tests.test_utils.mock_process import MockSubProcess
DEFAULT_DATE = timezone.datetime(2015, 1, 1)
DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat()
DEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10]
class TestHiveEnvironment(unittest.TestCase):
def setUp(self):
self.next_day = (DEFAULT_DATE +
datetime.timedelta(days=1)).isoformat()[:10]
self.database = 'airflow'
self.partition_by = 'ds'
self.table = 'static_babynames_partitioned'
with mock.patch('airflow.providers.apache.hive.hooks.hive.HiveMetastoreHook.get_metastore_client'
) as get_metastore_mock:
get_metastore_mock.return_value = mock.MagicMock()
self.hook = HiveMetastoreHook()
class TestHiveCliHook(unittest.TestCase):
@mock.patch('tempfile.tempdir', '/tmp/')
@mock.patch('tempfile._RandomNameSequence.__next__')
@mock.patch('subprocess.Popen')
def test_run_cli(self, mock_popen, mock_temp_dir):
mock_subprocess = MockSubProcess()
mock_popen.return_value = mock_subprocess
mock_temp_dir.return_value = "test_run_cli"
with mock.patch.dict('os.environ', {
'AIRFLOW_CTX_DAG_ID': 'test_dag_id',
'AIRFLOW_CTX_TASK_ID': 'test_task_id',
'AIRFLOW_CTX_EXECUTION_DATE': '2015-01-01T00:00:00+00:00',
'AIRFLOW_CTX_DAG_RUN_ID': '55',
'AIRFLOW_CTX_DAG_OWNER': 'airflow',
'AIRFLOW_CTX_DAG_EMAIL': '[email protected]',
}):
hook = MockHiveCliHook()
hook.run_cli("SHOW DATABASES")
hive_cmd = ['beeline', '-u', '"jdbc:hive2://localhost:10000/default"', '-hiveconf',
'airflow.ctx.dag_id=test_dag_id', '-hiveconf', 'airflow.ctx.task_id=test_task_id',
'-hiveconf', 'airflow.ctx.execution_date=2015-01-01T00:00:00+00:00', '-hiveconf',
'airflow.ctx.dag_run_id=55', '-hiveconf', 'airflow.ctx.dag_owner=airflow',
'-hiveconf', '[email protected]', '-hiveconf',
'mapreduce.job.queuename=airflow', '-hiveconf', 'mapred.job.queue.name=airflow',
'-hiveconf', 'tez.queue.name=airflow', '-f',
'/tmp/airflow_hiveop_test_run_cli/tmptest_run_cli']
mock_popen.assert_called_with(
hive_cmd,
stdout=mock_subprocess.PIPE,
stderr=mock_subprocess.STDOUT,
cwd="/tmp/airflow_hiveop_test_run_cli",
close_fds=True
)
@mock.patch('subprocess.Popen')
def test_run_cli_with_hive_conf(self, mock_popen):
hql = "set key;\n" \
"set airflow.ctx.dag_id;\nset airflow.ctx.dag_run_id;\n" \
"set airflow.ctx.task_id;\nset airflow.ctx.execution_date;\n"
dag_id_ctx_var_name = \
AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_DAG_ID']['env_var_format']
task_id_ctx_var_name = \
AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_TASK_ID']['env_var_format']
execution_date_ctx_var_name = \
AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_EXECUTION_DATE'][
'env_var_format']
dag_run_id_ctx_var_name = \
AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_DAG_RUN_ID'][
'env_var_format']
mock_output = ['Connecting to jdbc:hive2://localhost:10000/default',
'log4j:WARN No appenders could be found for logger (org.apache.hive.jdbc.Utils).',
'log4j:WARN Please initialize the log4j system properly.',
'log4j:WARN See http://logging.apache.org/log4j/1.2/faq.html#noconfig for more info.',
'Connected to: Apache Hive (version 1.2.1.2.3.2.0-2950)',
'Driver: Hive JDBC (version 1.2.1.spark2)',
'Transaction isolation: TRANSACTION_REPEATABLE_READ',
'0: jdbc:hive2://localhost:10000/default> USE default;',
'No rows affected (0.37 seconds)',
'0: jdbc:hive2://localhost:10000/default> set key;',
'+------------+--+',
'| set |',
'+------------+--+',
'| key=value |',
'+------------+--+',
'1 row selected (0.133 seconds)',
'0: jdbc:hive2://localhost:10000/default> set airflow.ctx.dag_id;',
'+---------------------------------+--+',
'| set |',
'+---------------------------------+--+',
'| airflow.ctx.dag_id=test_dag_id |',
'+---------------------------------+--+',
'1 row selected (0.008 seconds)',
'0: jdbc:hive2://localhost:10000/default> set airflow.ctx.dag_run_id;',
'+-----------------------------------------+--+',
'| set |',
'+-----------------------------------------+--+',
'| airflow.ctx.dag_run_id=test_dag_run_id |',
'+-----------------------------------------+--+',
'1 row selected (0.007 seconds)',
'0: jdbc:hive2://localhost:10000/default> set airflow.ctx.task_id;',
'+-----------------------------------+--+',
'| set |',
'+-----------------------------------+--+',
'| airflow.ctx.task_id=test_task_id |',
'+-----------------------------------+--+',
'1 row selected (0.009 seconds)',
'0: jdbc:hive2://localhost:10000/default> set airflow.ctx.execution_date;',
'+-------------------------------------------------+--+',
'| set |',
'+-------------------------------------------------+--+',
'| airflow.ctx.execution_date=test_execution_date |',
'+-------------------------------------------------+--+',
'1 row selected (0.006 seconds)',
'0: jdbc:hive2://localhost:10000/default> ',
'0: jdbc:hive2://localhost:10000/default> ',
'Closing: 0: jdbc:hive2://localhost:10000/default',
'']
with mock.patch.dict('os.environ', {
dag_id_ctx_var_name: 'test_dag_id',
task_id_ctx_var_name: 'test_task_id',
execution_date_ctx_var_name: 'test_execution_date',
dag_run_id_ctx_var_name: 'test_dag_run_id',
}):
hook = MockHiveCliHook()
mock_popen.return_value = MockSubProcess(output=mock_output)
output = hook.run_cli(hql=hql, hive_conf={'key': 'value'})
process_inputs = " ".join(mock_popen.call_args_list[0][0][0])
self.assertIn('value', process_inputs)
self.assertIn('test_dag_id', process_inputs)
self.assertIn('test_task_id', process_inputs)
self.assertIn('test_execution_date', process_inputs)
self.assertIn('test_dag_run_id', process_inputs)
self.assertIn('value', output)
self.assertIn('test_dag_id', output)
self.assertIn('test_task_id', output)
self.assertIn('test_execution_date', output)
self.assertIn('test_dag_run_id', output)
@mock.patch('airflow.providers.apache.hive.hooks.hive.HiveCliHook.run_cli')
def test_load_file_without_create_table(self, mock_run_cli):
filepath = "/path/to/input/file"
table = "output_table"
hook = MockHiveCliHook()
hook.load_file(filepath=filepath, table=table, create=False)
query = (
"LOAD DATA LOCAL INPATH '{filepath}' "
"OVERWRITE INTO TABLE {table} ;\n"
.format(filepath=filepath, table=table)
)
calls = [
mock.call(query)
]
mock_run_cli.assert_has_calls(calls, any_order=True)
@mock.patch('airflow.providers.apache.hive.hooks.hive.HiveCliHook.run_cli')
def test_load_file_create_table(self, mock_run_cli):
filepath = "/path/to/input/file"
table = "output_table"
field_dict = OrderedDict([("name", "string"), ("gender", "string")])
fields = ",\n ".join(
['`{k}` {v}'.format(k=k.strip('`'), v=v) for k, v in field_dict.items()])
hook = MockHiveCliHook()
hook.load_file(filepath=filepath, table=table,
field_dict=field_dict, create=True, recreate=True)
create_table = (
"DROP TABLE IF EXISTS {table};\n"
"CREATE TABLE IF NOT EXISTS {table} (\n{fields})\n"
"ROW FORMAT DELIMITED\n"
"FIELDS TERMINATED BY ','\n"
"STORED AS textfile\n;".format(table=table, fields=fields)
)
load_data = (
"LOAD DATA LOCAL INPATH '{filepath}' "
"OVERWRITE INTO TABLE {table} ;\n"
.format(filepath=filepath, table=table)
)
calls = [
mock.call(create_table),
mock.call(load_data)
]
mock_run_cli.assert_has_calls(calls, any_order=True)
@mock.patch('airflow.providers.apache.hive.hooks.hive.HiveCliHook.load_file')
@mock.patch('pandas.DataFrame.to_csv')
def test_load_df(self, mock_to_csv, mock_load_file):
df = pd.DataFrame({"c": ["foo", "bar", "baz"]})
table = "t"
delimiter = ","
encoding = "utf-8"
hook = MockHiveCliHook()
hook.load_df(df=df,
table=table,
delimiter=delimiter,
encoding=encoding)
assert mock_to_csv.call_count == 1
kwargs = mock_to_csv.call_args[1]
self.assertEqual(kwargs["header"], False)
self.assertEqual(kwargs["index"], False)
self.assertEqual(kwargs["sep"], delimiter)
assert mock_load_file.call_count == 1
kwargs = mock_load_file.call_args[1]
self.assertEqual(kwargs["delimiter"], delimiter)
self.assertEqual(kwargs["field_dict"], {"c": "STRING"})
self.assertTrue(isinstance(kwargs["field_dict"], OrderedDict))
self.assertEqual(kwargs["table"], table)
@mock.patch('airflow.providers.apache.hive.hooks.hive.HiveCliHook.load_file')
@mock.patch('pandas.DataFrame.to_csv')
def test_load_df_with_optional_parameters(self, mock_to_csv, mock_load_file):
hook = MockHiveCliHook()
bools = (True, False)
for create, recreate in itertools.product(bools, bools):
mock_load_file.reset_mock()
hook.load_df(df=pd.DataFrame({"c": range(0, 10)}),
table="t",
create=create,
recreate=recreate)
assert mock_load_file.call_count == 1
kwargs = mock_load_file.call_args[1]
self.assertEqual(kwargs["create"], create)
self.assertEqual(kwargs["recreate"], recreate)
@mock.patch('airflow.providers.apache.hive.hooks.hive.HiveCliHook.run_cli')
def test_load_df_with_data_types(self, mock_run_cli):
ord_dict = OrderedDict()
ord_dict['b'] = [True]
ord_dict['i'] = [-1]
ord_dict['t'] = [1]
ord_dict['f'] = [0.0]
ord_dict['c'] = ['c']
ord_dict['M'] = [datetime.datetime(2018, 1, 1)]
ord_dict['O'] = [object()]
ord_dict['S'] = [b'STRING']
ord_dict['U'] = ['STRING']
ord_dict['V'] = [None]
df = pd.DataFrame(ord_dict)
hook = MockHiveCliHook()
hook.load_df(df, 't')
query = """
CREATE TABLE IF NOT EXISTS t (
`b` BOOLEAN,
`i` BIGINT,
`t` BIGINT,
`f` DOUBLE,
`c` STRING,
`M` TIMESTAMP,
`O` STRING,
`S` STRING,
`U` STRING,
`V` STRING)
ROW FORMAT DELIMITED
FIELDS TERMINATED BY ','
STORED AS textfile
;
"""
assert_equal_ignore_multiple_spaces(
self, mock_run_cli.call_args_list[0][0][0], query)
class TestHiveMetastoreHook(TestHiveEnvironment):
VALID_FILTER_MAP = {'key2': 'value2'}
def test_get_max_partition_from_empty_part_specs(self):
max_partition = \
HiveMetastoreHook._get_max_partition_from_part_specs([],
'key1',
self.VALID_FILTER_MAP)
self.assertIsNone(max_partition)
# @mock.patch('airflow.providers.apache.hive.hooks.hive.HiveMetastoreHook', 'get_metastore_client')
def test_get_max_partition_from_valid_part_specs_and_invalid_filter_map(self):
with self.assertRaises(AirflowException):
HiveMetastoreHook._get_max_partition_from_part_specs(
[{'key1': 'value1', 'key2': 'value2'},
{'key1': 'value3', 'key2': 'value4'}],
'key1',
{'key3': 'value5'})
def test_get_max_partition_from_valid_part_specs_and_invalid_partition_key(self):
with self.assertRaises(AirflowException):
HiveMetastoreHook._get_max_partition_from_part_specs(
[{'key1': 'value1', 'key2': 'value2'},
{'key1': 'value3', 'key2': 'value4'}],
'key3',
self.VALID_FILTER_MAP)
def test_get_max_partition_from_valid_part_specs_and_none_partition_key(self):
with self.assertRaises(AirflowException):
HiveMetastoreHook._get_max_partition_from_part_specs(
[{'key1': 'value1', 'key2': 'value2'},
{'key1': 'value3', 'key2': 'value4'}],
None,
self.VALID_FILTER_MAP)
def test_get_max_partition_from_valid_part_specs_and_none_filter_map(self):
max_partition = \
HiveMetastoreHook._get_max_partition_from_part_specs(
[{'key1': 'value1', 'key2': 'value2'},
{'key1': 'value3', 'key2': 'value4'}],
'key1',
None)
# No partition will be filtered out.
self.assertEqual(max_partition, b'value3')
def test_get_max_partition_from_valid_part_specs(self):
max_partition = \
HiveMetastoreHook._get_max_partition_from_part_specs(
[{'key1': 'value1', 'key2': 'value2'},
{'key1': 'value3', 'key2': 'value4'}],
'key1',
self.VALID_FILTER_MAP)
self.assertEqual(max_partition, b'value1')
@mock.patch("airflow.providers.apache.hive.hooks.hive.HiveMetastoreHook.get_connection",
return_value=[Connection(host="localhost", port="9802")])
@mock.patch("airflow.providers.apache.hive.hooks.hive.socket")
def test_error_metastore_client(self, socket_mock, _find_valid_server_mock):
socket_mock.socket.return_value.connect_ex.return_value = 0
self.hook.get_metastore_client()
def test_get_conn(self):
with mock.patch('airflow.providers.apache.hive.hooks.hive.HiveMetastoreHook._find_valid_server'
) as find_valid_server:
find_valid_server.return_value = mock.MagicMock(return_value={})
metastore_hook = HiveMetastoreHook()
self.assertIsInstance(metastore_hook.get_conn(), HMSClient)
def test_check_for_partition(self):
# Check for existent partition.
FakePartition = namedtuple('FakePartition', ['values'])
fake_partition = FakePartition(['2015-01-01'])
metastore = self.hook.metastore.__enter__()
partition = "{p_by}='{date}'".format(date=DEFAULT_DATE_DS,
p_by=self.partition_by)
metastore.get_partitions_by_filter = mock.MagicMock(
return_value=[fake_partition])
self.assertTrue(
self.hook.check_for_partition(self.database, self.table,
partition)
)
metastore.get_partitions_by_filter(
self.database, self.table, partition, 1)
# Check for non-existent partition.
missing_partition = "{p_by}='{date}'".format(date=self.next_day,
p_by=self.partition_by)
metastore.get_partitions_by_filter = mock.MagicMock(return_value=[])
self.assertFalse(
self.hook.check_for_partition(self.database, self.table,
missing_partition)
)
metastore.get_partitions_by_filter.assert_called_with(
self.database, self.table, missing_partition, 1)
def test_check_for_named_partition(self):
# Check for existing partition.
partition = "{p_by}={date}".format(date=DEFAULT_DATE_DS,
p_by=self.partition_by)
self.hook.metastore.__enter__(
).check_for_named_partition = mock.MagicMock(return_value=True)
self.assertTrue(
self.hook.check_for_named_partition(self.database,
self.table,
partition))
self.hook.metastore.__enter__().check_for_named_partition.assert_called_with(
self.database, self.table, partition)
# Check for non-existent partition
missing_partition = "{p_by}={date}".format(date=self.next_day,
p_by=self.partition_by)
self.hook.metastore.__enter__().check_for_named_partition = mock.MagicMock(
return_value=False)
self.assertFalse(
self.hook.check_for_named_partition(self.database,
self.table,
missing_partition)
)
self.hook.metastore.__enter__().check_for_named_partition.assert_called_with(
self.database, self.table, missing_partition)
def test_get_table(self):
self.hook.metastore.__enter__().get_table = mock.MagicMock()
self.hook.get_table(db=self.database, table_name=self.table)
self.hook.metastore.__enter__().get_table.assert_called_with(
dbname=self.database, tbl_name=self.table)
def test_get_tables(self): # static_babynames_partitioned
self.hook.metastore.__enter__().get_tables = mock.MagicMock(
return_value=['static_babynames_partitioned'])
self.hook.get_tables(db=self.database, pattern=self.table + "*")
self.hook.metastore.__enter__().get_tables.assert_called_with(
db_name='airflow', pattern='static_babynames_partitioned*')
self.hook.metastore.__enter__().get_table_objects_by_name.assert_called_with(
'airflow', ['static_babynames_partitioned'])
def test_get_databases(self):
metastore = self.hook.metastore.__enter__()
metastore.get_databases = mock.MagicMock()
self.hook.get_databases(pattern='*')
metastore.get_databases.assert_called_with('*')
def test_get_partitions(self):
FakeFieldSchema = namedtuple('FakeFieldSchema', ['name'])
fake_schema = FakeFieldSchema('ds')
FakeTable = namedtuple('FakeTable', ['partitionKeys'])
fake_table = FakeTable([fake_schema])
FakePartition = namedtuple('FakePartition', ['values'])
fake_partition = FakePartition(['2015-01-01'])
metastore = self.hook.metastore.__enter__()
metastore.get_table = mock.MagicMock(return_value=fake_table)
metastore.get_partitions = mock.MagicMock(
return_value=[fake_partition])
partitions = self.hook.get_partitions(schema=self.database,
table_name=self.table)
self.assertEqual(len(partitions), 1)
self.assertEqual(partitions, [{self.partition_by: DEFAULT_DATE_DS}])
metastore.get_table.assert_called_with(
dbname=self.database, tbl_name=self.table)
metastore.get_partitions.assert_called_with(
db_name=self.database, tbl_name=self.table, max_parts=HiveMetastoreHook.MAX_PART_COUNT)
def test_max_partition(self):
FakeFieldSchema = namedtuple('FakeFieldSchema', ['name'])
fake_schema = FakeFieldSchema('ds')
FakeTable = namedtuple('FakeTable', ['partitionKeys'])
fake_table = FakeTable([fake_schema])
metastore = self.hook.metastore.__enter__()
metastore.get_table = mock.MagicMock(return_value=fake_table)
metastore.get_partition_names = mock.MagicMock(
return_value=['ds=2015-01-01'])
metastore.partition_name_to_spec = mock.MagicMock(
return_value={'ds': '2015-01-01'})
filter_map = {self.partition_by: DEFAULT_DATE_DS}
partition = self.hook.max_partition(schema=self.database,
table_name=self.table,
field=self.partition_by,
filter_map=filter_map)
self.assertEqual(partition, DEFAULT_DATE_DS.encode('utf-8'))
metastore.get_table.assert_called_with(
dbname=self.database, tbl_name=self.table)
metastore.get_partition_names.assert_called_with(
self.database, self.table, max_parts=HiveMetastoreHook.MAX_PART_COUNT)
metastore.partition_name_to_spec.assert_called_with('ds=2015-01-01')
def test_table_exists(self):
# Test with existent table.
self.hook.metastore.__enter__().get_table = mock.MagicMock(return_value=True)
self.assertTrue(self.hook.table_exists(self.table, db=self.database))
self.hook.metastore.__enter__().get_table.assert_called_with(
dbname='airflow', tbl_name='static_babynames_partitioned')
# Test with non-existent table.
self.hook.metastore.__enter__().get_table = mock.MagicMock(side_effect=Exception())
self.assertFalse(
self.hook.table_exists("does-not-exist")
)
self.hook.metastore.__enter__().get_table.assert_called_with(
dbname='default', tbl_name='does-not-exist')
class TestHiveServer2Hook(unittest.TestCase):
def _upload_dataframe(self):
df = pd.DataFrame({'a': [1, 2], 'b': [1, 2]})
self.local_path = '/tmp/TestHiveServer2Hook.csv'
df.to_csv(self.local_path, header=False, index=False)
def setUp(self):
self._upload_dataframe()
args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
self.dag = DAG('test_dag_id', default_args=args)
self.database = 'airflow'
self.table = 'hive_server_hook'
self.hql = """
CREATE DATABASE IF NOT EXISTS {{ params.database }};
USE {{ params.database }};
DROP TABLE IF EXISTS {{ params.table }};
CREATE TABLE IF NOT EXISTS {{ params.table }} (
a int,
b int)
ROW FORMAT DELIMITED
FIELDS TERMINATED BY ',';
LOAD DATA LOCAL INPATH '{{ params.csv_path }}'
OVERWRITE INTO TABLE {{ params.table }};
"""
self.columns = ['{}.a'.format(self.table),
'{}.b'.format(self.table)]
with mock.patch('airflow.providers.apache.hive.hooks.hive.HiveMetastoreHook.get_metastore_client'
) as get_metastore_mock:
get_metastore_mock.return_value = mock.MagicMock()
self.hook = HiveMetastoreHook()
def test_get_conn(self):
hook = MockHiveServer2Hook()
hook.get_conn()
@mock.patch('pyhive.hive.connect')
def test_get_conn_with_password(self, mock_connect):
conn_id = "conn_with_password"
conn_env = CONN_ENV_PREFIX + conn_id.upper()
with mock.patch.dict(
'os.environ',
{conn_env: "jdbc+hive2://conn_id:conn_pass@localhost:10000/default?authMechanism=LDAP"}
):
HiveServer2Hook(hiveserver2_conn_id=conn_id).get_conn()
mock_connect.assert_called_once_with(
host='localhost',
port=10000,
auth='LDAP',
kerberos_service_name=None,
username='conn_id',
password='conn_pass',
database='default')
def test_get_records(self):
hook = MockHiveServer2Hook()
query = "SELECT * FROM {}".format(self.table)
with mock.patch.dict('os.environ', {
'AIRFLOW_CTX_DAG_ID': 'test_dag_id',
'AIRFLOW_CTX_TASK_ID': 'HiveHook_3835',
'AIRFLOW_CTX_EXECUTION_DATE': '2015-01-01T00:00:00+00:00',
'AIRFLOW_CTX_DAG_RUN_ID': '55',
'AIRFLOW_CTX_DAG_OWNER': 'airflow',
'AIRFLOW_CTX_DAG_EMAIL': '[email protected]',
}):
results = hook.get_records(query, schema=self.database)
self.assertListEqual(results, [(1, 1), (2, 2)])
hook.get_conn.assert_called_with(self.database)
hook.mock_cursor.execute.assert_any_call(
'set airflow.ctx.dag_id=test_dag_id')
hook.mock_cursor.execute.assert_any_call(
'set airflow.ctx.task_id=HiveHook_3835')
hook.mock_cursor.execute.assert_any_call(
'set airflow.ctx.execution_date=2015-01-01T00:00:00+00:00')
hook.mock_cursor.execute.assert_any_call(
'set airflow.ctx.dag_run_id=55')
hook.mock_cursor.execute.assert_any_call(
'set airflow.ctx.dag_owner=airflow')
hook.mock_cursor.execute.assert_any_call(
'set [email protected]')
def test_get_pandas_df(self):
hook = MockHiveServer2Hook()
query = "SELECT * FROM {}".format(self.table)
with mock.patch.dict('os.environ', {
'AIRFLOW_CTX_DAG_ID': 'test_dag_id',
'AIRFLOW_CTX_TASK_ID': 'HiveHook_3835',
'AIRFLOW_CTX_EXECUTION_DATE': '2015-01-01T00:00:00+00:00',
'AIRFLOW_CTX_DAG_RUN_ID': '55',
'AIRFLOW_CTX_DAG_OWNER': 'airflow',
'AIRFLOW_CTX_DAG_EMAIL': '[email protected]',
}):
df = hook.get_pandas_df(query, schema=self.database)
self.assertEqual(len(df), 2)
self.assertListEqual(df["hive_server_hook.a"].values.tolist(), [1, 2])
hook.get_conn.assert_called_with(self.database)
hook.mock_cursor.execute.assert_any_call(
'set airflow.ctx.dag_id=test_dag_id')
hook.mock_cursor.execute.assert_any_call(
'set airflow.ctx.task_id=HiveHook_3835')
hook.mock_cursor.execute.assert_any_call(
'set airflow.ctx.execution_date=2015-01-01T00:00:00+00:00')
hook.mock_cursor.execute.assert_any_call(
'set airflow.ctx.dag_run_id=55')
hook.mock_cursor.execute.assert_any_call(
'set airflow.ctx.dag_owner=airflow')
hook.mock_cursor.execute.assert_any_call(
'set [email protected]')
def test_get_results_header(self):
hook = MockHiveServer2Hook()
query = "SELECT * FROM {}".format(self.table)
results = hook.get_results(query, schema=self.database)
self.assertListEqual([col[0] for col in results['header']],
self.columns)
def test_get_results_data(self):
hook = MockHiveServer2Hook()
query = "SELECT * FROM {}".format(self.table)
results = hook.get_results(query, schema=self.database)
self.assertListEqual(results['data'], [(1, 1), (2, 2)])
def test_to_csv(self):
hook = MockHiveServer2Hook()
hook._get_results = mock.MagicMock(return_value=iter([
[
('hive_server_hook.a', 'INT_TYPE', None, None, None, None, True),
('hive_server_hook.b', 'INT_TYPE', None, None, None, None, True)
], (1, 1), (2, 2)
]))
query = "SELECT * FROM {}".format(self.table)
csv_filepath = 'query_results.csv'
hook.to_csv(query, csv_filepath, schema=self.database,
delimiter=',', lineterminator='\n', output_header=True, fetch_size=2)
df = pd.read_csv(csv_filepath, sep=',')
self.assertListEqual(df.columns.tolist(), self.columns)
self.assertListEqual(df[self.columns[0]].values.tolist(), [1, 2])
self.assertEqual(len(df), 2)
def test_multi_statements(self):
sqls = [
"CREATE TABLE IF NOT EXISTS test_multi_statements (i INT)",
"SELECT * FROM {}".format(self.table),
"DROP TABLE test_multi_statements",
]
hook = MockHiveServer2Hook()
with mock.patch.dict('os.environ', {
'AIRFLOW_CTX_DAG_ID': 'test_dag_id',
'AIRFLOW_CTX_TASK_ID': 'HiveHook_3835',
'AIRFLOW_CTX_EXECUTION_DATE': '2015-01-01T00:00:00+00:00',
'AIRFLOW_CTX_DAG_RUN_ID': '55',
'AIRFLOW_CTX_DAG_OWNER': 'airflow',
'AIRFLOW_CTX_DAG_EMAIL': '[email protected]',
}):
# df = hook.get_pandas_df(query, schema=self.database)
results = hook.get_records(sqls, schema=self.database)
self.assertListEqual(results, [(1, 1), (2, 2)])
# self.assertEqual(len(df), 2)
# self.assertListEqual(df["hive_server_hook.a"].values.tolist(), [1, 2])
hook.get_conn.assert_called_with(self.database)
hook.mock_cursor.execute.assert_any_call(
'CREATE TABLE IF NOT EXISTS test_multi_statements (i INT)')
hook.mock_cursor.execute.assert_any_call(
'SELECT * FROM {}'.format(self.table))
hook.mock_cursor.execute.assert_any_call(
'DROP TABLE test_multi_statements')
hook.mock_cursor.execute.assert_any_call(
'set airflow.ctx.dag_id=test_dag_id')
hook.mock_cursor.execute.assert_any_call(
'set airflow.ctx.task_id=HiveHook_3835')
hook.mock_cursor.execute.assert_any_call(
'set airflow.ctx.execution_date=2015-01-01T00:00:00+00:00')
hook.mock_cursor.execute.assert_any_call(
'set airflow.ctx.dag_run_id=55')
hook.mock_cursor.execute.assert_any_call(
'set airflow.ctx.dag_owner=airflow')
hook.mock_cursor.execute.assert_any_call(
'set [email protected]')
def test_get_results_with_hive_conf(self):
hql = ["set key",
"set airflow.ctx.dag_id",
"set airflow.ctx.dag_run_id",
"set airflow.ctx.task_id",
"set airflow.ctx.execution_date"]
dag_id_ctx_var_name = \
AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_DAG_ID']['env_var_format']
task_id_ctx_var_name = \
AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_TASK_ID']['env_var_format']
execution_date_ctx_var_name = \
AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_EXECUTION_DATE'][
'env_var_format']
dag_run_id_ctx_var_name = \
AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_DAG_RUN_ID'][
'env_var_format']
with mock.patch.dict('os.environ', {
dag_id_ctx_var_name: 'test_dag_id',
task_id_ctx_var_name: 'test_task_id',
execution_date_ctx_var_name: 'test_execution_date',
dag_run_id_ctx_var_name: 'test_dag_run_id',
}):
hook = MockHiveServer2Hook()
hook._get_results = mock.MagicMock(return_value=iter(
["header", ("value", "test"), ("test_dag_id", "test"), ("test_task_id", "test"),
("test_execution_date", "test"), ("test_dag_run_id", "test")]
))
output = '\n'.join(res_tuple[0] for res_tuple in hook.get_results(
hql=hql, hive_conf={'key': 'value'})['data'])
self.assertIn('value', output)
self.assertIn('test_dag_id', output)
self.assertIn('test_task_id', output)
self.assertIn('test_execution_date', output)
self.assertIn('test_dag_run_id', output)
class TestHiveCli(unittest.TestCase):
def setUp(self):
self.nondefault_schema = "nondefault"
os.environ["AIRFLOW__CORE__SECURITY"] = "kerberos"
def tearDown(self):
del os.environ["AIRFLOW__CORE__SECURITY"]
def test_get_proxy_user_value(self):
hook = MockHiveCliHook()
returner = mock.MagicMock()
returner.extra_dejson = {'proxy_user': 'a_user_proxy'}
hook.use_beeline = True
hook.conn = returner
# Run
result = hook._prepare_cli_cmd()
# Verify
self.assertIn('hive.server2.proxy.user=a_user_proxy', result[2])
| apache-2.0 |
dsullivan7/scikit-learn | examples/cluster/plot_lena_segmentation.py | 271 | 2444 | """
=========================================
Segmenting the picture of Lena in regions
=========================================
This example uses :ref:`spectral_clustering` on a graph created from
voxel-to-voxel difference on an image to break this image into multiple
partly-homogeneous regions.
This procedure (spectral clustering on an image) is an efficient
approximate solution for finding normalized graph cuts.
There are two options to assign labels:
* with 'kmeans' spectral clustering will cluster samples in the embedding space
using a kmeans algorithm
* whereas 'discrete' will iteratively search for the closest partition
space to the embedding space.
"""
print(__doc__)
# Author: Gael Varoquaux <[email protected]>, Brian Cheung
# License: BSD 3 clause
import time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(lena)
# Take a decreasing function of the gradient: an exponential
# The smaller beta is, the more independent the segmentation is of the
# actual image. For beta=1, the segmentation is close to a voronoi
beta = 5
eps = 1e-6
graph.data = np.exp(-beta * graph.data / lena.std()) + eps
# Apply spectral clustering (this step goes much faster if you have pyamg
# installed)
N_REGIONS = 11
###############################################################################
# Visualize the resulting regions
for assign_labels in ('kmeans', 'discretize'):
t0 = time.time()
labels = spectral_clustering(graph, n_clusters=N_REGIONS,
assign_labels=assign_labels,
random_state=1)
t1 = time.time()
labels = labels.reshape(lena.shape)
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(N_REGIONS):
plt.contour(labels == l, contours=1,
colors=[plt.cm.spectral(l / float(N_REGIONS)), ])
plt.xticks(())
plt.yticks(())
plt.title('Spectral clustering: %s, %.2fs' % (assign_labels, (t1 - t0)))
plt.show()
| bsd-3-clause |
jorge2703/scikit-learn | examples/text/document_clustering.py | 230 | 8356 | """
=======================================
Clustering text documents using k-means
=======================================
This is an example showing how the scikit-learn can be used to cluster
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
Two feature extraction methods can be used in this example:
- TfidfVectorizer uses a in-memory vocabulary (a python dict) to map the most
frequent words to features indices and hence compute a word occurrence
frequency (sparse) matrix. The word frequencies are then reweighted using
the Inverse Document Frequency (IDF) vector collected feature-wise over
the corpus.
- HashingVectorizer hashes word occurrences to a fixed dimensional space,
possibly with collisions. The word count vectors are then normalized to
each have l2-norm equal to one (projected to the euclidean unit-ball) which
seems to be important for k-means to work in high dimensional space.
HashingVectorizer does not provide IDF weighting as this is a stateless
model (the fit method does nothing). When IDF weighting is needed it can
be added by pipelining its output to a TfidfTransformer instance.
Two algorithms are demoed: ordinary k-means and its more scalable cousin
minibatch k-means.
Additionally, latent sematic analysis can also be used to reduce dimensionality
and discover latent patterns in the data.
It can be noted that k-means (and minibatch k-means) are very sensitive to
feature scaling and that in this case the IDF weighting helps improve the
quality of the clustering by quite a lot as measured against the "ground truth"
provided by the class label assignments of the 20 newsgroups dataset.
This improvement is not visible in the Silhouette Coefficient which is small
for both as this measure seem to suffer from the phenomenon called
"Concentration of Measure" or "Curse of Dimensionality" for high dimensional
datasets such as text data. Other measures such as V-measure and Adjusted Rand
Index are information theoretic based evaluation scores: as they are only based
on cluster assignments rather than distances, hence not affected by the curse
of dimensionality.
Note: as k-means is optimizing a non-convex objective function, it will likely
end up in a local optimum. Several runs with independent random init might be
necessary to get a good convergence.
"""
# Author: Peter Prettenhofer <[email protected]>
# Lars Buitinck <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from sklearn.datasets import fetch_20newsgroups
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from sklearn import metrics
from sklearn.cluster import KMeans, MiniBatchKMeans
import logging
from optparse import OptionParser
import sys
from time import time
import numpy as np
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--lsa",
dest="n_components", type="int",
help="Preprocess documents with latent semantic analysis.")
op.add_option("--no-minibatch",
action="store_false", dest="minibatch", default=True,
help="Use ordinary k-means algorithm (in batch mode).")
op.add_option("--no-idf",
action="store_false", dest="use_idf", default=True,
help="Disable Inverse Document Frequency feature weighting.")
op.add_option("--use-hashing",
action="store_true", default=False,
help="Use a hashing feature vectorizer")
op.add_option("--n-features", type=int, default=10000,
help="Maximum number of features (dimensions)"
" to extract from text.")
op.add_option("--verbose",
action="store_true", dest="verbose", default=False,
help="Print progress reports inside k-means algorithm.")
print(__doc__)
op.print_help()
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
dataset = fetch_20newsgroups(subset='all', categories=categories,
shuffle=True, random_state=42)
print("%d documents" % len(dataset.data))
print("%d categories" % len(dataset.target_names))
print()
labels = dataset.target
true_k = np.unique(labels).shape[0]
print("Extracting features from the training dataset using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
if opts.use_idf:
# Perform an IDF normalization on the output of HashingVectorizer
hasher = HashingVectorizer(n_features=opts.n_features,
stop_words='english', non_negative=True,
norm=None, binary=False)
vectorizer = make_pipeline(hasher, TfidfTransformer())
else:
vectorizer = HashingVectorizer(n_features=opts.n_features,
stop_words='english',
non_negative=False, norm='l2',
binary=False)
else:
vectorizer = TfidfVectorizer(max_df=0.5, max_features=opts.n_features,
min_df=2, stop_words='english',
use_idf=opts.use_idf)
X = vectorizer.fit_transform(dataset.data)
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X.shape)
print()
if opts.n_components:
print("Performing dimensionality reduction using LSA")
t0 = time()
# Vectorizer results are normalized, which makes KMeans behave as
# spherical k-means for better results. Since LSA/SVD results are
# not normalized, we have to redo the normalization.
svd = TruncatedSVD(opts.n_components)
normalizer = Normalizer(copy=False)
lsa = make_pipeline(svd, normalizer)
X = lsa.fit_transform(X)
print("done in %fs" % (time() - t0))
explained_variance = svd.explained_variance_ratio_.sum()
print("Explained variance of the SVD step: {}%".format(
int(explained_variance * 100)))
print()
###############################################################################
# Do the actual clustering
if opts.minibatch:
km = MiniBatchKMeans(n_clusters=true_k, init='k-means++', n_init=1,
init_size=1000, batch_size=1000, verbose=opts.verbose)
else:
km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1,
verbose=opts.verbose)
print("Clustering sparse data with %s" % km)
t0 = time()
km.fit(X)
print("done in %0.3fs" % (time() - t0))
print()
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, km.labels_))
print("Completeness: %0.3f" % metrics.completeness_score(labels, km.labels_))
print("V-measure: %0.3f" % metrics.v_measure_score(labels, km.labels_))
print("Adjusted Rand-Index: %.3f"
% metrics.adjusted_rand_score(labels, km.labels_))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, km.labels_, sample_size=1000))
print()
if not opts.use_hashing:
print("Top terms per cluster:")
if opts.n_components:
original_space_centroids = svd.inverse_transform(km.cluster_centers_)
order_centroids = original_space_centroids.argsort()[:, ::-1]
else:
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
terms = vectorizer.get_feature_names()
for i in range(true_k):
print("Cluster %d:" % i, end='')
for ind in order_centroids[i, :10]:
print(' %s' % terms[ind], end='')
print()
| bsd-3-clause |
SpheMakh/msutils | MSUtils/ClassESW.py | 1 | 7690 | import matplotlib
matplotlib.use('Agg')
import sys
import os
import numpy
import numpy.ma as ma
import pylab
from scipy.interpolate import interp1d
from scipy import interpolate
from MSUtils import msutils
from pyrap.tables import table
import matplotlib.cm as cm
MEERKAT_SEFD = numpy.array([
[ 856e6, 580.],
[ 900e6, 578.],
[ 950e6, 559.],
[1000e6, 540.],
[1050e6, 492.],
[1100e6, 443.],
[1150e6, 443.],
[1200e6, 443.],
[1250e6, 443.],
[1300e6, 453.],
[1350e6, 443.],
[1400e6, 424.],
[1450e6, 415.],
[1500e6, 405.],
[1550e6, 405.],
[1600e6, 405.],
[1650e6, 424.],
[1711e6, 421.]], dtype=numpy.float32)
class MSNoise(object):
"""
Estimates visibility noise statistics as a function of frequency given a measurement set (MS).
This statistics can be used to generate weights which can be saved in the MS.
"""
def __init__(self, ms):
"""
Args:
ms (Directory, CASA Table):
CASA measurement set
"""
self.ms = ms
# First get some basic info about the Ms
self.msinfo = msutils.summary(self.ms, display=False)
self.nrows = self.msinfo['NROW']
self.ncor = self.msinfo['NCOR']
self.spw = {
"freqs" : self.msinfo['SPW']['CHAN_FREQ'],
"nchan" : self.msinfo['SPW']['NUM_CHAN'],
}
self.nspw = len(self.spw['freqs'])
def estimate_noise(self, corr=None, autocorr=False):
"""
Estimate visibility noise
"""
return
def estimate_weights(self, mode='specs',
stats_data=None, normalise=True,
smooth='polyn', fit_order=9,
plot_stats=True):
"""
Args:
mode (str, optional):
Mode for estimating noise statistics. These are the options:
- specs : This is a file or array of values that are proportional to the sensitivity as a function of
frequency. For example SEFD values. Column one should be the frequency, and column two should be the sensitivity.
- calc : Calculate noise internally. The calculations estimates the noise by taking differences between
adjacent channels.
noise_data (file, list, numpy.ndarray):
File or array containing information about sensitivity as a function of frequency (in Hz)
smooth (str, optional):
Generate a smooth version of the data. This version is used for further calculations. Options are:
- polyn : Smooth with a polynomial. This is the dafualt
- spline : Smooth with a spline
fit_order (int, optional):
Oder for function used to smooth the data. Default is 9
"""
#TODO(sphe): add function to estimate noise for the other mode.
# For now, fix the mode
mode = 'specs'
if mode=='specs':
if isinstance(stats_data, str):
__data = numpy.load(stats_data)
else:
__data = numpy.array(stats_data, dtype=numpy.float32)
x,y = __data[:,0], __data[:,1]
elif mode=='calc':
# x,y = self.estimate_noise()
pass
if normalise:
y /= y.max()
# lets work in MHz
x = x*1e-6
if smooth=='polyn':
fit_parms = numpy.polyfit(x, y, fit_order)
fit_func = lambda freqs: numpy.poly1d(fit_parms)(freqs)
elif smooth=='spline':
fit_parms = interpolate.splrep(x, y, s=fit_order)
fit_func = lambda freqs: interpolate.splev(freqs, fit_parms, der=0)
# Get noise from the parameterised functions for each spectral window
fig, ax1 = pylab.subplots(figsize=(12,9))
ax2 = ax1.twinx()
color = iter(cm.rainbow(numpy.linspace(0,1,self.nspw)))
noise = []
weights = []
for i in range(self.nspw):
freqs = numpy.array(self.spw['freqs'][i], dtype=numpy.float32)*1e-6
_noise = fit_func(freqs)
_weights = 1.0/_noise**2
if plot_stats:
# Use a differnet color to mark a new SPW
ax1.axvspan(freqs[0]/1e3, freqs[-1]/1e3, facecolor=next(color), alpha=0.25)
# Plot noise/weights
l1, = ax1.plot(x/1e3, y, 'rx')
l2, = ax1.plot(freqs/1e3, _noise, 'k-')
ax1.set_xlabel('Freq [GHz]')
ax1.set_ylabel('Norm Noise')
l3, = ax2.plot(freqs/1e3, _weights, 'g-')
ax2.set_ylabel('Weight')
noise.append(_noise)
weights.append(_weights)
# Set limits based on non-smooth noise
ylims = 1/y**2
ax2.set_ylim(ylims.min()*0.9, ylims.max()*1.1)
pylab.legend([l1,l2,l3],
['Norm. Noise', 'Polynomial fit: n={0:d}'.format(fit_order), 'Weights'], loc=1)
if isinstance(plot_stats, str):
pylab.savefig(plot_stats)
else:
pylab.savefig(self.ms + '-noise_weights.png')
pylab.clf()
return noise, weights
def write_toms(self, data,
columns=['WEIGHT', 'WEIGHT_SPECTRUM'],
stat='sum', rowchunk=None, multiply_old_weights=False):
"""
Write noise or weights into an MS.
Args:
columns (list):
columns to write weights/noise and spectral counterparts into. Default is
columns = ['WEIGHT', 'WEIGHT_SPECTRUM']
stat (str):
Statistic to compute when combining data along frequency axis. For example,
used the sum along Frequency axis of WEIGHT_SPECTRUM as weight for the WEIGHT column
"""
# Initialise relavant columns. It will exit with zero status if the column alredy exists
for i, column in enumerate(columns):
msutils.addcol(self.ms, colname=column,
valuetype='float',
clone='WEIGHT' if i==0 else 'DATA',
)
for spw in range(self.nspw):
tab = table(self.ms, readonly=False)
# Write data into MS in chunks
rowchunk = rowchunk or self.nrows/10
for row0 in range(0, self.nrows, rowchunk):
nr = min(rowchunk, self.nrows-row0)
# Shape for this chunk
dshape = [nr, self.spw['nchan'][spw], self.ncor]
__data = numpy.ones(dshape, dtype=numpy.float32) * data[spw][numpy.newaxis,:,numpy.newaxis]
# Consider old weights if user wants to
if multiply_old_weights:
old_weight = tab.getcol('WEIGHT', row0, nr)
print("Multiplying old weights into WEIGHT_SPECTRUM")
__data *= old_weight[:,numpy.newaxis,:]
# make a masked array to compute stats using unflagged data
flags = tab.getcol('FLAG', row0, nr)
mdata = ma.masked_array(__data, mask=flags)
print(("Populating {0:s} column (rows {1:d} to {2:d})".format(columns[1], row0, row0+nr-1)))
tab.putcol(columns[1], __data, row0, nr)
print(("Populating {0:s} column (rows {1:d} to {2:d})".format(columns[0], row0, row0+nr-1)))
if stat=="stddev":
tab.putcol(columns[0], mdata.std(axis=1).data, row0, nr)
elif stat=="sum":
tab.putcol(columns[0], mdata.sum(axis=1).data, row0, nr)
# Done
tab.close()
| gpl-2.0 |
nuclear-wizard/moose | modules/porous_flow/test/tests/thm_rehbinder/thm_rehbinder.py | 12 | 6179 | #!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
def rehbinder(r):
# Results from Rehbinder with parameters used in the MOOSE simulation.
# Rehbinder's manuscript contains a few typos - I've corrected them here.
# G Rehbinder "Analytic solutions of stationary coupled thermo-hydro-mechanical problems" Int J Rock Mech Min Sci & Geomech Abstr 32 (1995) 453-463
poisson = 0.2
thermal_expansion = 1E-6
young = 1E10
fluid_density = 1000
fluid_specific_heat = 1000
permeability = 1E-12
fluid_viscosity = 1E-3
thermal_conductivity = 1E6
P0 = 1E6
T0 = 1E3
Tref = T0
r0 = 0.1
r1 = 1.0
xi = r / r0
xi1 = r1 / r0
Peclet = fluid_density * fluid_specific_heat * thermal_expansion * Tref * young * permeability / fluid_viscosity / thermal_conductivity / (1 - poisson)
That0 = T0 / Tref
sigmahat0 = -P0 * (1 - poisson) / thermal_expansion / Tref / young
Tzeroth = That0 * (1 - np.log(xi) / np.log(xi1))
Tfirst_pr = 2 * sigmahat0 * That0 * xi * (np.log(xi) - np.log(xi1)) / np.log(xi1)**2
Cone = 2 * That0 * sigmahat0 * (2 + np.log(xi1)) / np.log(xi1)**2
Cone = 2 * That0 * sigmahat0 / np.log(xi1) # Corrected Eqn(87)
Done = 2 * That0 * sigmahat0 * (2 * (xi1 - 1) / np.log(xi1) - 1) / np.log(xi1)**2
Done = 2 * That0 * sigmahat0 * (- 1) / np.log(xi1)**2 # Corrected Eqn(87)
Tfirst_hm = Cone + Done * np.log(xi)
Tfirst = Tfirst_pr + Tfirst_hm
That = Tzeroth + Peclet * Tfirst
T = Tref * That
Pzeroth = -sigmahat0 * (1 - np.log(xi) / np.log(xi1))
Pfirst = 0
Phat = Pzeroth + Peclet * Pfirst
P = thermal_expansion * Tref * young * Phat / (1 - poisson)
g0 = Tzeroth + (1 - 2 * poisson) * Pzeroth / (1 - poisson)
uzeroth_pr = (That0 - sigmahat0 * (1 - 2 * poisson) / (1 - poisson)) * (0.5 * (xi**2 - 1) - 0.25 * (1 - xi**2 + 2 * xi**2 * np.log(xi)) / np.log(xi1)) / xi
uzeroth_pr_xi1 = (That0 - sigmahat0 * (1 - 2 * poisson) / (1 - poisson)) * (0.5 * (xi1**2 - 1) - 0.25 * (1 - xi1**2 + 2 * xi1**2 * np.log(xi1)) / np.log(xi1)) / xi1
# fixed outer boundary
Bzeroth = - ((1 - 2 * poisson) * sigmahat0 + uzeroth_pr_xi1 / xi1) / (1 - 2 * poisson + 1.0 / xi1)
Azeroth = - Bzeroth / xi1**2 - uzeroth_pr_xi1 / xi1
fixed_uzeroth_hm = Azeroth * xi + Bzeroth / xi
fixed_uzeroth = uzeroth_pr + fixed_uzeroth_hm
# free outer boundary
Bzeroth = (xi1**2 * sigmahat0 - xi1 * uzeroth_pr_xi1) / (1 - xi1**2)
Azeroth = (1 - 2 * poisson) * (Bzeroth + sigmahat0)
free_uzeroth_hm = Azeroth * xi + Bzeroth / xi
free_uzeroth = uzeroth_pr + free_uzeroth_hm
ufirst_pr = (1.0 / xi) * (0.5 * (xi**2 - 1) * (2 * Cone - Done) + 0.5 * Done * xi**2 * np.log(xi) + 2 * sigmahat0 * That0 / np.log(xi1)**2 * (xi**3 * np.log(xi) / 3 + (1 - xi**3) / 9 + 0.5 * np.log(xi1) * (1 - xi**2)))
ufirst_pr_xi1 = (1.0 / xi1) * (0.5 * (xi1**2 - 1) * (2 * Cone - Done) + 0.5 * Done * xi1**2 * np.log(xi1) + 2 * sigmahat0 * That0 / np.log(xi1)**2 * (xi1**3 * np.log(xi1) / 3 + (1 - xi1**3) / 9 + 0.5 * np.log(xi1) * (1 - xi1**2)))
# fixed outer boundary
Bfirst = - ufirst_pr_xi1 / xi1 / (1 - 2 * poisson + 1.0 / xi1**2)
Afirst = - Bfirst / xi1**2 - ufirst_pr_xi1 / xi1
fixed_ufirst_hm = Afirst * xi + Bfirst / xi
fixed_ufirst = ufirst_pr + fixed_ufirst_hm
# free outer boundary
Bfirst = xi1 * ufirst_pr_xi1 / (1 - xi1**2)
Afirst = (1 - 2 * poisson) * Bfirst
free_ufirst_hm = Afirst * xi + Bfirst / xi
free_ufirst = ufirst_pr + free_ufirst_hm
fixed_uhat = fixed_uzeroth + Peclet * fixed_ufirst
fixed_u = thermal_expansion * Tref * r0 * fixed_uhat * (1 + poisson) / (1 - poisson) # Corrected Eqn(16)
free_uhat = free_uzeroth + Peclet * free_ufirst
free_u = thermal_expansion * Tref * r0 * free_uhat * (1 + poisson) / (1 - poisson) # Corrected Eqn(16)
return (T, P, fixed_u, free_u)
def moose(fn):
try:
f = open(fn)
data = f.readlines()[1:-1]
data = [map(float, d.strip().split(",")) for d in data]
data = ([d[0] for d in data], [d[4] for d in data])
f.close()
except:
sys.stderr.write("Cannot read " + fn + ", or it contains erroneous data\n")
sys.exit(1)
return data
mooser = [0.1 * i for i in range(1, 11)]
fixedT = moose("gold/fixed_outer_T_0001.csv")
fixedP = moose("gold/fixed_outer_P_0001.csv")
fixedu = moose("gold/fixed_outer_U_0001.csv")
freeu = moose("gold/free_outer_U_0001.csv")
rpoints = np.arange(0.1, 1.0, 0.01)
expected = zip(*[rehbinder(r) for r in rpoints])
plt.figure()
plt.plot(rpoints, expected[0], 'k-', linewidth = 3.0, label = 'expected')
plt.plot(fixedT[0], fixedT[1], 'rs', markersize = 10.0, label = 'MOOSE')
plt.legend(loc = 'upper right')
plt.xlabel("r (m)")
plt.ylabel("Temperature (K)")
plt.title("Temperature around cavity")
plt.savefig("temperature_fig.pdf")
plt.figure()
plt.plot(rpoints, [1E-6 * p for p in expected[1]], 'k-', linewidth = 3.0, label = 'expected')
plt.plot(fixedP[0], [1E-6 * p for p in fixedP[1]], 'rs', markersize = 10.0, label = 'MOOSE')
plt.legend(loc = 'upper right')
plt.xlabel("r (m)")
plt.ylabel("Porepressure (MPa)")
plt.title("Porepressure around cavity")
plt.savefig("porepressure_fig.pdf")
plt.figure()
plt.plot(rpoints, [1000 * u for u in expected[2]], 'k-', linewidth = 3.0, label = 'expected (fixed)')
plt.plot(fixedu[0], [1000 * u for u in fixedu[1]], 'rs', markersize = 10.0, label = 'MOOSE (fixed)')
plt.plot(rpoints, [1000 * u for u in expected[3]], 'b-', linewidth = 2.0, label = 'expected (free)')
plt.plot(freeu[0], [1000 * u for u in freeu[1]], 'g*', markersize = 13.0, label = 'MOOSE (free)')
plt.legend(loc = 'center right')
plt.xlabel("r (m)")
plt.ylabel("displacement (mm)")
plt.title("Radial displacement around cavity")
plt.savefig("displacement_fig.pdf")
sys.exit(0)
| lgpl-2.1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.