repo_name
stringlengths 6
92
| path
stringlengths 4
191
| copies
stringclasses 322
values | size
stringlengths 4
6
| content
stringlengths 821
753k
| license
stringclasses 15
values |
---|---|---|---|---|---|
RabadanLab/MITKats | Modules/Biophotonics/python/iMC/scripts/ipcai_to_caffe/script_create_lmdb_database.py | 6 | 1647 |
import os
import pandas as pd
import lmdb
import caffe
from regression.preprocessing import preprocess
def create_lmdb(path_to_simulation_results, lmdb_name):
df = pd.read_csv(path_to_simulation_results, header=[0, 1])
X, y = preprocess(df, snr=10.)
y = y.values * 1000
# We need to prepare the database for the size. We'll set it 10 times
# greater than what we theoretically need. There is little drawback to
# setting this too big. If you still run into problem after raising
# this, you might want to try saving fewer entries in a single
# transaction.
map_size = X.nbytes * 10
env = lmdb.open(lmdb_name, map_size=map_size)
with env.begin(write=True) as txn:
# txn is a Transaction object
for i in range(X.shape[0]):
datum = caffe.proto.caffe_pb2.Datum()
datum.channels = X.shape[1]
datum.height = 1
datum.width = 1
datum.data = X[i].tobytes() # or .tostring() if numpy < 1.9
datum.label = int(y[i])
str_id = '{:08}'.format(i)
# The encode is only essential in Python 3
txn.put(str_id.encode('ascii'), datum.SerializeToString())
data_root = "/media/wirkert/data/Data/2016_02_02_IPCAI/results/intermediate"
TRAIN_IMAGES = os.path.join(data_root,
"ipcai_revision_colon_mean_scattering_train_all_spectrocam.txt")
TEST_IMAGES = os.path.join(data_root,
"ipcai_revision_colon_mean_scattering_test_all_spectrocam.txt")
create_lmdb(TRAIN_IMAGES, "ipcai_train_lmdb")
create_lmdb(TEST_IMAGES, "ipcai_test_lmdb")
| bsd-3-clause |
maciejkula/scipy | scipy/spatial/tests/test__plotutils.py | 71 | 1463 | from __future__ import division, print_function, absolute_import
from numpy.testing import dec, assert_, assert_array_equal
try:
import matplotlib
matplotlib.rcParams['backend'] = 'Agg'
import matplotlib.pyplot as plt
has_matplotlib = True
except:
has_matplotlib = False
from scipy.spatial import \
delaunay_plot_2d, voronoi_plot_2d, convex_hull_plot_2d, \
Delaunay, Voronoi, ConvexHull
class TestPlotting:
points = [(0,0), (0,1), (1,0), (1,1)]
@dec.skipif(not has_matplotlib, "Matplotlib not available")
def test_delaunay(self):
# Smoke test
fig = plt.figure()
obj = Delaunay(self.points)
s_before = obj.simplices.copy()
r = delaunay_plot_2d(obj, ax=fig.gca())
assert_array_equal(obj.simplices, s_before) # shouldn't modify
assert_(r is fig)
delaunay_plot_2d(obj, ax=fig.gca())
@dec.skipif(not has_matplotlib, "Matplotlib not available")
def test_voronoi(self):
# Smoke test
fig = plt.figure()
obj = Voronoi(self.points)
r = voronoi_plot_2d(obj, ax=fig.gca())
assert_(r is fig)
voronoi_plot_2d(obj)
@dec.skipif(not has_matplotlib, "Matplotlib not available")
def test_convex_hull(self):
# Smoke test
fig = plt.figure()
tri = ConvexHull(self.points)
r = convex_hull_plot_2d(tri, ax=fig.gca())
assert_(r is fig)
convex_hull_plot_2d(tri)
| bsd-3-clause |
sketchytechky/zipline | zipline/examples/dual_moving_average.py | 10 | 4332 | #!/usr/bin/env python
#
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dual Moving Average Crossover algorithm.
This algorithm buys apple once its short moving average crosses
its long moving average (indicating upwards momentum) and sells
its shares once the averages cross again (indicating downwards
momentum).
"""
from zipline.api import order_target, record, symbol, history, add_history
def initialize(context):
# Register 2 histories that track daily prices,
# one with a 100 window and one with a 300 day window
add_history(100, '1d', 'price')
add_history(300, '1d', 'price')
context.sym = symbol('AAPL')
context.i = 0
def handle_data(context, data):
# Skip first 300 days to get full windows
context.i += 1
if context.i < 300:
return
# Compute averages
# history() has to be called with the same params
# from above and returns a pandas dataframe.
short_mavg = history(100, '1d', 'price').mean()
long_mavg = history(300, '1d', 'price').mean()
# Trading logic
if short_mavg[context.sym] > long_mavg[context.sym]:
# order_target orders as many shares as needed to
# achieve the desired number of shares.
order_target(context.sym, 100)
elif short_mavg[context.sym] < long_mavg[context.sym]:
order_target(context.sym, 0)
# Save values for later inspection
record(AAPL=data[context.sym].price,
short_mavg=short_mavg[context.sym],
long_mavg=long_mavg[context.sym])
# Note: this function can be removed if running
# this algorithm on quantopian.com
def analyze(context=None, results=None):
import matplotlib.pyplot as plt
import logbook
logbook.StderrHandler().push_application()
log = logbook.Logger('Algorithm')
fig = plt.figure()
ax1 = fig.add_subplot(211)
results.portfolio_value.plot(ax=ax1)
ax1.set_ylabel('Portfolio value (USD)')
ax2 = fig.add_subplot(212)
ax2.set_ylabel('Price (USD)')
# If data has been record()ed, then plot it.
# Otherwise, log the fact that no data has been recorded.
if ('AAPL' in results and 'short_mavg' in results and
'long_mavg' in results):
results['AAPL'].plot(ax=ax2)
results[['short_mavg', 'long_mavg']].plot(ax=ax2)
trans = results.ix[[t != [] for t in results.transactions]]
buys = trans.ix[[t[0]['amount'] > 0 for t in
trans.transactions]]
sells = trans.ix[
[t[0]['amount'] < 0 for t in trans.transactions]]
ax2.plot(buys.index, results.short_mavg.ix[buys.index],
'^', markersize=10, color='m')
ax2.plot(sells.index, results.short_mavg.ix[sells.index],
'v', markersize=10, color='k')
plt.legend(loc=0)
else:
msg = 'AAPL, short_mavg & long_mavg data not captured using record().'
ax2.annotate(msg, xy=(0.1, 0.5))
log.info(msg)
plt.show()
# Note: this if-block should be removed if running
# this algorithm on quantopian.com
if __name__ == '__main__':
from datetime import datetime
import pytz
from zipline.algorithm import TradingAlgorithm
from zipline.utils.factory import load_from_yahoo
# Set the simulation start and end dates.
start = datetime(2011, 1, 1, 0, 0, 0, 0, pytz.utc)
end = datetime(2013, 1, 1, 0, 0, 0, 0, pytz.utc)
# Load price data from yahoo.
data = load_from_yahoo(stocks=['AAPL'], indexes={}, start=start,
end=end)
# Create and run the algorithm.
algo = TradingAlgorithm(initialize=initialize, handle_data=handle_data,
identifiers=['AAPL'])
results = algo.run(data)
# Plot the portfolio and asset data.
analyze(results=results)
| apache-2.0 |
Akshay0724/scikit-learn | examples/tree/plot_unveil_tree_structure.py | 13 | 4839 | """
=========================================
Understanding the decision tree structure
=========================================
The decision tree structure can be analysed to gain further insight on the
relation between the features and the target to predict. In this example, we
show how to retrieve:
- the binary tree structure;
- the depth of each node and whether or not it's a leaf;
- the nodes that were reached by a sample using the ``decision_path`` method;
- the leaf that was reached by a sample using the apply method;
- the rules that were used to predict a sample;
- the decision path shared by a group of samples.
"""
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
iris = load_iris()
X = iris.data
y = iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
estimator = DecisionTreeClassifier(max_leaf_nodes=3, random_state=0)
estimator.fit(X_train, y_train)
# The decision estimator has an attribute called tree_ which stores the entire
# tree structure and allows access to low level attributes. The binary tree
# tree_ is represented as a number of parallel arrays. The i-th element of each
# array holds information about the node `i`. Node 0 is the tree's root. NOTE:
# Some of the arrays only apply to either leaves or split nodes, resp. In this
# case the values of nodes of the other type are arbitrary!
#
# Among those arrays, we have:
# - left_child, id of the left child of the node
# - right_child, id of the right child of the node
# - feature, feature used for splitting the node
# - threshold, threshold value at the node
#
# Using those arrays, we can parse the tree structure:
n_nodes = estimator.tree_.node_count
children_left = estimator.tree_.children_left
children_right = estimator.tree_.children_right
feature = estimator.tree_.feature
threshold = estimator.tree_.threshold
# The tree structure can be traversed to compute various properties such
# as the depth of each node and whether or not it is a leaf.
node_depth = np.zeros(shape=n_nodes, dtype=np.int64)
is_leaves = np.zeros(shape=n_nodes, dtype=bool)
stack = [(0, -1)] # seed is the root node id and its parent depth
while len(stack) > 0:
node_id, parent_depth = stack.pop()
node_depth[node_id] = parent_depth + 1
# If we have a test node
if (children_left[node_id] != children_right[node_id]):
stack.append((children_left[node_id], parent_depth + 1))
stack.append((children_right[node_id], parent_depth + 1))
else:
is_leaves[node_id] = True
print("The binary tree structure has %s nodes and has "
"the following tree structure:"
% n_nodes)
for i in range(n_nodes):
if is_leaves[i]:
print("%snode=%s leaf node." % (node_depth[i] * "\t", i))
else:
print("%snode=%s test node: go to node %s if X[:, %s] <= %s else to "
"node %s."
% (node_depth[i] * "\t",
i,
children_left[i],
feature[i],
threshold[i],
children_right[i],
))
print()
# First let's retrieve the decision path of each sample. The decision_path
# method allows to retrieve the node indicator functions. A non zero element of
# indicator matrix at the position (i, j) indicates that the sample i goes
# through the node j.
node_indicator = estimator.decision_path(X_test)
# Similarly, we can also have the leaves ids reached by each sample.
leave_id = estimator.apply(X_test)
# Now, it's possible to get the tests that were used to predict a sample or
# a group of samples. First, let's make it for the sample.
sample_id = 0
node_index = node_indicator.indices[node_indicator.indptr[sample_id]:
node_indicator.indptr[sample_id + 1]]
print('Rules used to predict sample %s: ' % sample_id)
for node_id in node_index:
if leave_id[sample_id] != node_id:
continue
if (X_test[sample_id, feature[node_id]] <= threshold[node_id]):
threshold_sign = "<="
else:
threshold_sign = ">"
print("decision id node %s : (X[%s, %s] (= %s) %s %s)"
% (node_id,
sample_id,
feature[node_id],
X_test[i, feature[node_id]],
threshold_sign,
threshold[node_id]))
# For a group of samples, we have the following common node.
sample_ids = [0, 1]
common_nodes = (node_indicator.toarray()[sample_ids].sum(axis=0) ==
len(sample_ids))
common_node_id = np.arange(n_nodes)[common_nodes]
print("\nThe following samples %s share the node %s in the tree"
% (sample_ids, common_node_id))
print("It is %s %% of all nodes." % (100 * len(common_node_id) / n_nodes,))
| bsd-3-clause |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/lib/mpl_examples/pylab_examples/quadmesh_demo.py | 14 | 1111 | #!/usr/bin/env python
"""
pcolormesh uses a QuadMesh, a faster generalization of pcolor, but
with some restrictions.
This demo illustrates a bug in quadmesh with masked data.
"""
import numpy as np
from matplotlib.pyplot import figure, show, savefig
from matplotlib import cm, colors
from numpy import ma
n = 12
x = np.linspace(-1.5,1.5,n)
y = np.linspace(-1.5,1.5,n*2)
X,Y = np.meshgrid(x,y);
Qx = np.cos(Y) - np.cos(X)
Qz = np.sin(Y) + np.sin(X)
Qx = (Qx + 1.1)
Z = np.sqrt(X**2 + Y**2)/5;
Z = (Z - Z.min()) / (Z.max() - Z.min())
# The color array can include masked values:
Zm = ma.masked_where(np.fabs(Qz) < 0.5*np.amax(Qz), Z)
fig = figure()
ax = fig.add_subplot(121)
ax.set_axis_bgcolor("#bdb76b")
ax.pcolormesh(Qx,Qz,Z, shading='gouraud')
ax.set_title('Without masked values')
ax = fig.add_subplot(122)
ax.set_axis_bgcolor("#bdb76b")
# You can control the color of the masked region:
#cmap = cm.jet
#cmap.set_bad('r', 1.0)
#ax.pcolormesh(Qx,Qz,Zm, cmap=cmap)
# Or use the default, which is transparent:
col = ax.pcolormesh(Qx,Qz,Zm,shading='gouraud')
ax.set_title('With masked values')
show()
| gpl-2.0 |
fspaolo/scikit-learn | examples/plot_johnson_lindenstrauss_bound.py | 8 | 7418 | """
=====================================================================
The Johnson-Lindenstrauss bound for embedding with random projections
=====================================================================
The `Johnson-Lindenstrauss lemma`_ states that any high dimensional
dataset can be randomly projected into a lower dimensional Euclidean
space while controlling the distortion in the pairwise distances.
.. _`Johnson-Lindenstrauss lemma`: http://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma
Theoretical bounds
==================
The distortion introduced by a random projection `p` is asserted by
the fact that `p` is defining an eps-embedding with good probability
as defined by:
(1 - eps) ||u - v||^2 < ||p(u) - p(v)||^2 < (1 + eps) ||u - v||^2
Where u and v are any rows taken from a dataset of shape [n_samples,
n_features] and p is a projection by a random Gaussian N(0, 1) matrix
with shape [n_components, n_features] (or a sparse Achlioptas matrix).
The minimum number of components to guarantees the eps-embedding is
given by:
n_components >= 4 log(n_samples) / (eps^2 / 2 - eps^3 / 3)
The first plot shows that with an increasing number of samples ``n_samples``,
the minimal number of dimensions ``n_components`` increased logarithmically
in order to guarantee an ``eps``-embedding.
The second plot shows that an increase of the admissible
distortion ``eps`` allows to reduce drastically the minimal number of
dimensions ``n_components`` for a given number of samples ``n_samples``
Empirical validation
====================
We validate the above bounds on the the digits dataset or on the 20 newsgroups
text document (TF-IDF word frequencies) dataset:
- for the digits dataset, some 8x8 gray level pixels data for 500
handwritten digits pictures are randomly projected to spaces for various
larger number of dimensions ``n_components``.
- for the 20 newsgroups dataset some 500 documents with 100k
features in total are projected using a sparse random matrix to smaller
euclidean spaces with various values for the target number of dimensions
``n_components``.
The default dataset is the digits dataset. To run the example on the twenty
newsgroups dataset, pass the --twenty-newsgroups command line argument to this
script.
For each value of ``n_components``, we plot:
- 2D distribution of sample pairs with pairwise distances in original
and projected spaces as x and y axis respectively.
- 1D histogram of the ratio of those distances (projected / original).
We can see that for low values of ``n_components`` the distribution is wide
with many distorted pairs and a skewed distribution (due to the hard
limit of zero ratio on the left as distances are always positives)
while for larger values of n_components the distortion is controlled
and the distances are well preserved by the random projection.
Remarks
=======
According to the JL lemma, projecting 500 samples without too much distortion
will require at least several thousands dimensions, irrespectively of the
number of features of the original dataset.
Hence using random projections on the digits dataset which only has 64 features
in the input space does not make sense: it does not allow for dimensionality
reduction in this case.
On the twenty newsgroups on the other hand the dimensionality can be decreased
from 56436 down to 10000 while reasonably preserving pairwise distances.
"""
print(__doc__)
import sys
from time import time
import numpy as np
import pylab as pl
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import SparseRandomProjection
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.datasets import load_digits
from sklearn.metrics.pairwise import euclidean_distances
# Part 1: plot the theoretical dependency between n_components_min and
# n_samples
# range of admissible distortions
eps_range = np.linspace(0.1, 0.99, 5)
colors = pl.cm.Blues(np.linspace(0.3, 1.0, len(eps_range)))
# range of number of samples (observation) to embed
n_samples_range = np.logspace(1, 9, 9)
pl.figure()
for eps, color in zip(eps_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, eps=eps)
pl.loglog(n_samples_range, min_n_components, color=color)
pl.legend(["eps = %0.1f" % eps for eps in eps_range], loc="lower right")
pl.xlabel("Number of observations to eps-embed")
pl.ylabel("Minimum number of dimensions")
pl.title("Johnson-Lindenstrauss bounds:\nn_samples vs n_components")
pl.show()
# range of admissible distortions
eps_range = np.linspace(0.01, 0.99, 100)
# range of number of samples (observation) to embed
n_samples_range = np.logspace(2, 6, 5)
colors = pl.cm.Blues(np.linspace(0.3, 1.0, len(n_samples_range)))
pl.figure()
for n_samples, color in zip(n_samples_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples, eps=eps_range)
pl.semilogy(eps_range, min_n_components, color=color)
pl.legend(["n_samples = %d" % n for n in n_samples_range], loc="upper right")
pl.xlabel("Distortion eps")
pl.ylabel("Minimum number of dimensions")
pl.title("Johnson-Lindenstrauss bounds:\nn_components vs eps")
pl.show()
# Part 2: perform sparse random projection of some digits images which are
# quite low dimensional and dense or documents of the 20 newsgroups dataset
# which is both high dimensional and sparse
if '--twenty-newsgroups' in sys.argv:
# Need an internet connection hence not enabled by default
data = fetch_20newsgroups_vectorized().data[:500]
else:
data = load_digits().data[:500]
n_samples, n_features = data.shape
print("Embedding %d samples with dim %d using various random projections"
% (n_samples, n_features))
n_components_range = np.array([300, 1000, 10000])
dists = euclidean_distances(data, squared=True).ravel()
# select only non-identical samples pairs
nonzero = dists != 0
dists = dists[nonzero]
for n_components in n_components_range:
t0 = time()
rp = SparseRandomProjection(n_components=n_components)
projected_data = rp.fit_transform(data)
print("Projected %d samples from %d to %d in %0.3fs"
% (n_samples, n_features, n_components, time() - t0))
if hasattr(rp, 'components_'):
n_bytes = rp.components_.data.nbytes
n_bytes += rp.components_.indices.nbytes
print("Random matrix with size: %0.3fMB" % (n_bytes / 1e6))
projected_dists = euclidean_distances(
projected_data, squared=True).ravel()[nonzero]
pl.figure()
pl.hexbin(dists, projected_dists, gridsize=100)
pl.xlabel("Pairwise squared distances in original space")
pl.ylabel("Pairwise squared distances in projected space")
pl.title("Pairwise distances distribution for n_components=%d" %
n_components)
cb = pl.colorbar()
cb.set_label('Sample pairs counts')
rates = projected_dists / dists
print("Mean distances rate: %0.2f (%0.2f)"
% (np.mean(rates), np.std(rates)))
pl.figure()
pl.hist(rates, bins=50, normed=True, range=(0., 2.))
pl.xlabel("Squared distances rate: projected / original")
pl.ylabel("Distribution of samples pairs")
pl.title("Histogram of pairwise distance rates for n_components=%d" %
n_components)
pl.show()
# TODO: compute the expected value of eps and add them to the previous plot
# as vertical lines / region
| bsd-3-clause |
canfar/cadcstats | condor/condor_parser.py | 1 | 13380 | import sys
import csv
import re
if "-pre" in sys.argv:
preOS = True
elif "-post" in sys.argv:
preOS = False
else:
print("missing OpenStack flag (-pre/-post) ...")
sys.exit(0)
csvOutput = True if "-csv" in sys.argv else False
jsonOutput = True if "-json" in sys.argv else False
if not (csvOutput or jsonOutput):
print("at least specify one type of output, -csv or -json ...")
sys.exit(0)
try:
log = sys.argv[sys.argv.index("-f") + 1]
try:
f = open(log, "r")
f.close()
except FileNotFoundError:
print("the input file specified by -f cannot be found ...")
sys.exit(0)
except ValueError:
print("missing input flag -f ...")
sys.exit(0)
except IndexError:
print("no file specified by -f ...")
sys.exit(0)
# list of fields that need no manipulation
implicit = [
"JobStatus",
"CommittedTime", # duration of the job including suspension
"CommittedSuspensionTime", # suspension time of the job
"CumulativeSlotTime", # core seconds
"NumJobStarts",
"RequestCpus",
"JobStartDate",
"VMName", # only available preOS
"RemoveReason",
"CompletionDate"
]
explicit = ["QDate", "Project", "Owner", "VMInstanceType", "VMInstanceName", "VMSpec.CPU", "VMSpec.RAM", "VMSpec.DISK", "RequestMemory", "RequestDisk", "MemoryUsage", "DiskUsage", "ClusterId", "ProcId"]
# the table mapping VM uuid to specifications
VM = {
# VM_ID RAM(MB) DISK(GB) SCRATCH(GB) CPU
"083093b3-ffc1-464e-a453-cefce795021b":[ 6144 , 0 , 0 , 4 ],
"0eb207f9-4575-4bd2-a430-1ed50e821d05":[ 61440 , 20 , 186 , 8 ],
"2cb70964-721d-47ff-badb-b702898b6fc2":[ 12288 , 0 , 0 , 8 ],
"2e33b8b5-d8d1-4fd8-913c-990f34a89002":[ 7680 , 20 , 31 , 2 ],
"2ff7463c-dda9-4687-8b7a-80ad3303fd41":[ 3072 , 0 , 0 , 2 ],
"327fa6c5-4ec8-432d-9607-cd7f40252320":[ 92160 , 20 , 186 , 8 ],
"39e8550a-c2cf-4094-93c0-fb70b35b6a6c":[ 1536 , 0 , 0 , 1 ],
"4998f4d2-b664-4d9d-8e0d-2071f3e44b10":[ 30720 , 20 , 83 , 4 ],
"4f61f147-353c-4a24-892b-f95a1a523ef6":[ 7680 , 20 , 30 , 1 ],
"5c86b033-a1d0-4b6a-b1c9-4a57ad84d594":[ 30720 , 20 , 380 , 8 ],
"6164f230-4859-4bf5-8f5b-fc450d8a8fb0":[ 15360 , 20 , 80 , 2 ],
"64a90d5f-71fc-4644-bc64-f8d907249e35":[ 61440 , 20 , 780 , 16 ],
"69174301-2d70-4bc1-9061-66b2eaff5d07":[ 15360 , 20 , 180 , 4 ],
"7c7fdfc0-57e6-49e9-bbde-37add33e1681":[ 61440 , 20 , 380 , 8 ],
"88e57477-b6a5-412e-85e0-69ff48ceb45c":[ 46080 , 20 , 180 , 4 ],
"91407374-25de-4c0a-bd76-c0bdaecf47eb":[ 122880 , 20 , 392 , 16 ],
"9493fdd3-3100-440d-a9a1-020d93701ed2":[ 15360 , 20 , 83 , 4 ],
"aa8ca469-e939-40ba-964d-28bfd1c61480":[ 15360 , 20 , 31 , 2 ],
"ac5155b2-87c8-42ed-9b56-edd00b3880cc":[ 122880 , 20 , 780 , 16 ],
"b64b981a-e832-47e9-903f-fb98cff0579b":[ 61440 , 20 , 392 , 16 ],
"bcb3eb5a-8485-4520-b06c-cb5a58bb482f":[ 30720 , 0 , 0 , 8 ],
"c94c95cc-6641-475b-b044-98b24a22dcaa":[ 7680 , 20 , 80 , 2 ],
"d2d56ca5-511b-4a4b-89eb-1d6f06ee58b1":[ 30720 , 20 , 186 , 8 ],
"da751037-da00-4eff-bca1-0d21dafaa347":[ 46080 , 20 , 83 , 4 ],
"de70f75f-83a0-43ce-8ac6-be3837359a0a":[ 30720 , 20 , 180 , 4 ],
"df94e28a-8983-4b4a-baa8-ffb824591c23":[ 92160 , 20 , 380 , 8 ],
# more
"13efd2a1-2fd8-48c4-822f-ce9bdc0e0004":[122880 , 20 , 780 , 16 ],
"23090fc1-bdf7-433e-9804-a7ec3d11de08":[15360 , 20 , 80 , 2 ],
"5112ed51-d263-4cc7-8b0f-7ef4782f783c":[46080 , 20 , 180 , 4 ],
"6c1ed3eb-6341-470e-92b7-5142014e7c5e":[7680 , 20 , 80 , 2 ],
"72009191-d893-4a07-871c-7f6e50b4e110":[61440 , 20 , 380 , 8 ],
"8061864c-722b-4f79-83af-91c3a835bd48":[15360 , 20 , 180 , 4 ],
"848b71a2-ae6b-4fcf-bba4-b7b0fccff5cf":[6144 , 0 , 0 , 8 ],
"8953676d-def7-4290-b239-4a14311fbb69":[30720 , 20 , 380 , 8 ],
"a55036b9-f40c-4781-a293-789647c063d7":[92160 , 20 , 380 , 8 ],
"d816ae8b-ab7d-403d-ae5f-f457b775903d":[184320 , 20 , 780 , 16 ],
"f9f6fbd7-a0af-4604-8911-041ea6cbbbe4":[768 , 0 , 0 , 1 ],
# same table but aliases
"c16.med":[122880,20,780,16],
"c2.med":[15360,20,80,2],
"p8-12gb":[12288,0,0,8],
"c4.hi":[46080,20,180,4],
"c2.low":[7680,20,80,2],
"c8.med":[61440,20,380,8],
"c4.low":[15360,20,180,4],
"p8-6gb":[6144,0,0,8],
"c8.low":[30720,20,380,8],
"c8.hi":[92160,20,380,8],
"c16.hi":[184320,20,780,16],
"p1-0.75gb-tobedeleted":[768,0,0,1],
# more, from CC, scratch or disk space is missing so I put 0 as disk space
"126e8ef0-b816-43ed-bd5f-b1d4e16fdda0":[7680 , 0 , 80 , 2 ],
"34ed4d1a-e7d5-4c74-8fdd-3db36c4bcbdb":[245760 , 0 , 1500 , 16 ],
"4100db19-f4c9-4ac8-8fed-1fd4c0a282e5":[196608 , 0 , 1000 , 12 ],
"5ea7bc52-ce75-4501-9978-fad52608809d":[122880 , 0 , 750 , 8 ],
"8e3133f4-dc5a-4fdf-9858-39e099027253":[32768 , 0 , 0 , 16 ],
"9431e310-432a-4edb-9604-d7d2d5aef0f7":[196608 , 0 , 1100 , 12 ],
"9cabf7e3-1f74-463c-ab38-d46e7f92d616":[184320 , 0 , 780 , 16 ],
"d67eccfe-042b-4f86-a2fc-92398ebc811b":[7680 , 0 , 30 , 1 ]
}
ownerProj = {
"jkavelaars":"UVic_KBOs",
"mtb55":"UVic_KBOs",
"sgwyn":"moproc",
"durand":"HST-RW",
"sfabbro":"ots",
"ptsws":"ots",
"chenc":"ots",
"fraserw":"UVic_KBOs",
"lff":"ngvs",
"pashartic":"ngvs",
"gwyn":"moproc",
"glass0":"UVic_KBOs",
"patcote":"ngvs",
"lauren":"ngvs",
"jjk":"UVic_KBOs",
"yingyu":"ngvs",
"rpike":"UVic_KBOs",
"nickball":"cadc",
"woodleka":"pandas",
"cadctest":"cadc",
"markbooth":"debris",
"MarkBooth":"debris",
"goliaths":"cadc",
"dschade":"cadc",
"pritchetsupernovae":"ots",
"ryan":"CANFAROps",
"jroediger":"ngvs",
"cshankm":"UVic_KBOs",
"fabbros":"ots",
"jouellet":"cadc",
"jonathansick":"androphot",
"shaimaaali":"shaimaaali",
"ShaimaaAli":"shaimaaali",
"johnq":"ngvs",
"helenkirk":"jcmt",
"canfradm":"CANFAR",
"clare":"ots",
"taylorm":"UVic_KBOs",
"nhill":"cadc",
"canfrops":"CANFAROps",
"laurenm":"ngvs",
"nick":"cadc",
"layth":"ngvs",
"jpveran":"aot",
"jpv":"aot",
"caread966":"ngvs",
"nvulic":"nvulic",
"kwoodley":"pandas",
"sanaz":"cfhtlens",
"cadcauthtest1":"cadc",
"dcolombo":"mwsynthesis",
"fpierfed":"HST-RW",
"echapin":"scuba2",
"jenkinsd":"cadc",
"brendam":"debris",
"russell":"cadc",
"trystynb":"ngvs",
"davids":"cadc",
"scott":"scuba2",
"streeto":"streeto",
"matthews":"debris",
"jrseti":"seti",
"bsibthorpe":"debris",
"gerryharp":"seti",
"hguy":"canarie",
"majorb":"cadc",
"samlawler":"debris",
"cadcsw":"cadc",
"canfar":"CANFAR",
"markb":"debris"
}
ownerDup = {
"gwyn":"sgwyn",
"ssgwyn":"sgwyn",
"jjk":"jkavelaars",
"fabbros":"sfabbro"
}
with open(log, "r", encoding='utf-8') as fin:
# entire output file
output = []
# each line
out = []
# used to calc mem usage
# -1 is to be handled by logstash
residentSetSize = 0
diskUsg = 0
imgSize = 0
stDate, endDate = 0, 0
# used for early logs where VMInstanceType is not defined
# where the spec of vm is written in three separate fields
vmCPUCores, vmMem, vmStorage = 0, 0, 0
# for preOS logs it is possible that RequestMemory is not available in Requirements
# i.e.: Requirements = (VMType =?= "nimbus_test" && Arch == "INTEL" && Memory >= 2048 && Cpus >= 1)
# ==> RequestMemory = 2048
# -vs-
# Requirements = (Arch == "INTEL") && (OpSys == "LINUX") && (Disk >= DiskUsage) && (((Memory * 1024) >= ImageSize)
# ==> RequestMemory unknown
# then RequestMemory is assumed to be VMMem
findReqMem = False
# for preOS we might not be able to find the project name in VMLoc field
# then proj name = owner
#findProj = False
# if it is year 2014. 2014 is different: even tho it is preOS but MemoryUsage is calculated by postOS method
yr2014 = False
# preOS, Project is traslate through ownerProj dictionary
owner = ''
# a set to dissolve timestamp conflicts
# ts = set()
content = fin.readlines()
for i, line in enumerate(content):
t = line.strip().split(" = ", 1)
# if the current line is not "*** offset ...."
if not re.match("\*\*\*", t[0]) :
if any( t[0] == x for x in implicit):
pass
# convert QDate into millisecond and add 1 ms to avoid collision
elif t[0] == "QDate":
t[1] = int(t[1])# * 1000
#k = 1
#tmp = t[1]
#while tmp in ts:
# tmp = t[1] + k
# k += 1
#ts.add(tmp)
# in ms: 2014-1-1 00:00:00 2015-1-1 00:00:00
if t[1] >= 1388534400 and t[1] < 1420070400:
yr2014 = True
t[1] = str(t[1])
# elif t[0] == "RemoveReason":
# t[1] = '"' + t[1] + '"'
elif t[0] == "LastRemoteHost":
t[0] = "VMInstanceName"
elif t[0] == "Owner":
owner = t[1][1:-1]
if owner in ownerDup:
owner = owner.replace(owner, ownerDup[owner])
t[1] = '"' + owner + '"'
elif t[0] == "VMCPUCores":
vmCPUCores = int(t[1].replace('"', ''))
continue
elif t[0] == "VMMem":
if re.search("G", t[1]):
vmMem = int(t[1].replace('"', '').replace("G","")) * 1024
else:
vmMem = int(t[1].replace('"', '').replace("G",""))
continue
elif t[0] == "VMStorage":
vmStorage = int(t[1].replace('"', '').replace("G",""))
continue
# from Requirements grab:
# RequestMemory, if available (preOS)
# Project:VMName (postOS)
elif t[0] == "Requirements":
if preOS:
try:
out.append('"RequestMemory":%s' % re.search("Memory\ \>\=\ (\d+)\ \&{2}\ Cpus", t[1]).group(1))
findReqMem = True
except AttributeError:
pass
else:
r = re.search("VMType\ \=\?\=\ \"(.+)\:([^\"]+)?\"", t[1])
# there are cases in history.20160304T220132 history.20150627T031409 history.20150704T171253, that we can't find Proj:VMNam in Requirements, and I will not catch this exception.
if r:
out.append( '"Project":"%s"' % r.group(1).replace("canfar-", "") )
out.append( '"VMName":"%s"' % r.group(2) )
else:
print("Can't find Proj:VMNam at line %i" % i)
continue
# # grab Project from VMLoc, preOS
# elif t[0] == "VMLoc" and preOS:
# try:
# t[1] = '"' + re.search("vospace\/([^\/]+)\/", t[1]).group(1) + '"'
# t[0] = "Project"
# findProj = True
# except AttributeError:
# continue
# from "VMInstanceType" grab the vm flavor, and translate into VMSpecs
elif t[0] == "VMInstanceType" and not preOS:
spcKey = ""
try:
spcKey = re.search('\:(.*)\"', t[1]).group(1)
if spcKey == "5c1ed3eb-6341-470e-92b7-5142014e7c5e" or spcKey == "12345678-6341-470e-92b7-5142014e7c5e":
pass
out.append('"VMSpec.RAM":%i' % VM[spcKey][0])
out.append('"VMSpec.DISK":%i' % (VM[spcKey][1] + VM[spcKey][2]))
out.append('"VMSpec.CPU":%i' % VM[spcKey][3])
except KeyError:
out.append('"VMSpec.RAM":%i' % 0)
out.append('"VMSpec.DISK":%i' % 0)
out.append('"VMSpec.CPU":%i' % 0)
t[1] = '"' + spcKey + '"'
elif t[0] == "ResidentSetSize":
residentSetSize = int(t[1])
continue
# convert to mb
elif t[0] == "DiskUsage":
diskUsg = int(t[1]) / 1000
t[1] = str(diskUsg)
# convert to mb
elif t[0] == "ImageSize":
imgSize = int(t[1])
continue
else:
continue
out.append( "\"" + t[0].strip() + "\":" + t[1] )
else:
r = re.search("ClusterId\ \=\ (\d+)\ ProcId\ \=\ (\d+)", line)
try:
out.append('"ClusterId":%s' % r.group(1))
out.append('"ProcId":%s' % r.group(2))
except AttributeError:
out.append('"ClusterId":-1')
out.append('"ProcId":-1')
if preOS:# and not yr2014:
try:
out.append('"Project":"%s"' % ownerProj[owner])
except KeyError:
out.append('"Project":""')
out.append('"VMSpec.RAM":%i' % vmMem)
out.append('"VMSpec.DISK":%i' % vmStorage)
out.append('"VMSpec.CPU":%i' % vmCPUCores)
if not yr2014:
out.append('"MemoryUsage":%.3f' % (imgSize / 1000))
if not findReqMem:
out.append('"RequestMemory":%i' % vmMem)
#if not findProj:
# out.append('"Project":%s' % owner)
else:
# for 2014, MemoryUsage = ( residentSetSize + 1023 ) / 1024
memoUsg = 0 if residentSetSize == 0 else (( residentSetSize + 1023 ) / 1024)
out.append('"MemoryUsage":%.3f' % memoUsg)
# for 2014, RequestMemory = MemoryUsage if MemoryUsage!=Null else ( ImageSize + 1023 ) / 1024
out.append('"RequestMemory":%.3f' % ((0 if imgSize == 0 else (imgSize + 1023) / 1024) if memoUsg == 0 else memoUsg))
# yr 2014 and postOS, fairly straightforward
else:
# for postOS, MemoryUsage = ( residentSetSize + 1023 ) / 1024
memoUsg = 0 if residentSetSize == 0 else (( residentSetSize + 1023 ) / 1024)
out.append('"MemoryUsage":%.3f' % memoUsg)
# for postOS, RequestMemory = MemoryUsage if MemoryUsage!=Null else ( ImageSize + 1023 ) / 1024
out.append('"RequestMemory":%.3f' % ((0 if imgSize == 0 else (imgSize + 1023) / 1024) if memoUsg == 0 else memoUsg))
# for all years, RequestDisk = DiskUsage
out.append('"RequestDisk":%.3f' % diskUsg)
# if the job finishes, compute the duration
# merge to ES JSON
# change if want to merge into other format
output.append("{" + ",".join(out) + "}\n")
# reset all the vars
rmRsn, owner = [""] * 2
out = []
#[findReqMem, findProj, yr2014, findRmRsn] = [False] * 4
[findReqMem, yr2014, findRmRsn] = [False] * 3
[residentSetSize, diskUsg, imgSize, vmCPUCores, vmMem, vmStorage, stDate, endDate] = [0] * 8
if jsonOutput:
with open(log+".json","w") as fout:
for out in output:
fout.write('%s' % out)
if csvOutput:
with open(log+".csv","w") as fout:
colName = explicit + implicit
w = csv.DictWriter(fout, fieldnames = colName)
w.writeheader()
for line in output:
w.writerow(eval(line))
| mit |
adamcandy/qgis-plugins-meshing | dev/old/contouring/bathymetry_metric.py | 3 | 26872 | #!/usr/bin/env python
##########################################################################
#
# Generation of boundary representation from arbitrary geophysical
# fields and initialisation for anisotropic, unstructured meshing.
#
# Copyright (C) 2011-2013 Dr Adam S. Candy, [email protected]
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##########################################################################
import sys
import shutil
import math
from Scientific.IO import NetCDF
import matplotlib
matplotlib.use('Agg')
from pylab import contour
#import matplotlib
#matplotlib._cntr.Cntr
#from matplotlib import contour
#matplotlib.use('Agg')
from numpy import zeros, array, append
#contour = matplotlib.pyplot.contour
# TODO
# Calculate area in right projection
# Add region selection function
# Ensure all islands selected
# Identify Open boundaries differently
# Export command line to geo file
# If nearby, down't clode with parallel
def printv(text):
if (arguments.verbose):
print text
gmsh_comment(text)
def printvv(text):
if (arguments.debug):
print text
def gmsh_comment(comment):
output.write( '// ' + comment + '\n')
def expand_boxes(region, boxes):
def error():
print 'Error in argument for -b.'
sys.exit(1)
def build_function(function, requireand, axis, comparison, number):
if (len(number) > 0):
function = '%s%s(%s %s %s)' % (function, requireand, axis, comparison, number)
requireand = ' and '
return [function, requireand]
#re.sub(pattern, repl, string,
#((latitude >= -89.0) and (latitude <=-65.0) and (longitude >= -64.0) and (longitude <= -20.0))'
if (len(boxes) > 0):
function = ''
requireor = ''
for box in boxes:
longlat = box.split(',')
if (len(longlat) != 2): error()
long = longlat[0].split(':')
lat = longlat[1].split(':')
if ((len(long) != 2) and (len(lat) != 2)): error()
function_box = ''
requireand = ''
if (len(long) == 2):
[function_box, requireand] = build_function(function_box, requireand, 'longitude', '>=', long[0])
[function_box, requireand] = build_function(function_box, requireand, 'longitude', '<=', long[1])
if (len(lat) == 2):
[function_box, requireand] = build_function(function_box, requireand, 'latitude', '>=', lat[0])
[function_box, requireand] = build_function(function_box, requireand, 'latitude', '<=', lat[1])
if (len(function_box) > 0):
function = '%s%s(%s)' % (function, requireor, function_box)
requireor = ' or '
if (len(function) > 0):
if (region is not 'True'):
region = '((%s) and (%s))' % (region, function)
else:
region = function
return region
def usage():
print '''
-n filename | Input netCDF file
-f filename | Output Gmsh file
-p path1 (path2).. | Specify paths to include
-r function | Function specifying region of interest
-b box1 (box2).. | Boxes with regions of interest
-a minarea | Minimum area of islands
-dx dist | Distance of steps when drawing parallels and meridians (currently in degrees - need to project)
-no | Do not include open boundaries
-lat latitude | Latitude to extent open domain to
-s scenario | Select scenario (in development)
-g | Generate bathymetry .pos file (in development)
-go filename | Output bathymetry .pos file (in development)
-v | Verbose
-vv | Very verbose (debugging)
-q | Quiet
-h | Help
------------------------------------------------------------
Example usage:
Include only the main Antarctic mass (path 1), and only parts which lie below 60S
rtopo_mask_to_stereographic.py -r 'latitude <= -60.0' -p 1
Filchner-Ronne extended out to the 65S parallel
rtopo_mask_to_stereographic.py -no -b -85.0:-20.0,-89.0:-75.0 -64.0:-30.0,-89.0:-70.0 -30.0:-20.0,-89.0:-75.0 -lat '-65.0'
Antarctica, everything below the 60S parallel, coarse approximation to open boundary
rtopo_mask_to_stereographic.py -dx 2 -r 'latitude <= -60'
Small region close to the Filcher-Ronne ice shelf
rtopo_mask_to_stereographic.py -no -b -85.0:-20.0,-89.0:-75.0 -64.0:-30.0,-89.0:-70.0 -30.0:-20.0,-89.0:-75.0 -p 1 -r 'latitude <= -83'
Amundsen Sea
rtopo_mask_to_stereographic.py -no -b -130.0:-85.0,-85.0:-60.0 -lat -64.0
Small islands, single out, or group with -p
312, 314
79 - an island on 90W 68S
'''
sys.exit(0)
#def scenario(name):
# filcher_ronne = argument
argv = sys.argv[1:]
dx_default = 0.1
class arguments:
input = '/d/dataset/rtopo/RTopo105b_50S.nc'
#output = './stereographic_projection.geo'
output = './shorelines.geo'
boundaries = []
region = 'True'
box = []
minarea = 0
dx = dx_default
bathymetrydatagenerate = True
bathymetrydataoutput = './bathymetry.pos'
extendtolatitude = None
open = True
verbose = True
debug = False
call = ' '.join(argv)
while (len(argv) > 0):
argument = argv.pop(0).rstrip()
if (argument == '-h'): usage()
elif (argument == '-s'): arguments.scenario = str(argv.pop(0).rstrip()); arguments=scenario(arguments.scenario)
elif (argument == '-n'): arguments.input = argv.pop(0).rstrip()
elif (argument == '-f'): arguments.output = argv.pop(0).rstrip()
elif (argument == '-r'): arguments.region = argv.pop(0).rstrip()
elif (argument == '-dx'): arguments.dx = float(argv.pop(0).rstrip())
elif (argument == '-lat'): arguments.extendtolatitude = float(argv.pop(0).rstrip())
elif (argument == '-a'): arguments.minarea = float(argv.pop(0).rstrip())
elif (argument == '-no'): arguments.open = False
elif (argument == '-g'): arguments.bathymetrydatagenerate = True
elif (argument == '-go'): arguments.bathymetrydataoutput = argv.pop(0).rstrip()
elif (argument == '-v'): arguments.verbose = True
elif (argument == '-vv'): arguments.verbose = True; arguments.debug = True;
elif (argument == '-q'): arguments.verbose = False
elif (argument == '-p'):
while ((len(argv) > 0) and (argv[0][0] != '-')):
arguments.boundaries.append(int(argv.pop(0).rstrip()))
elif (argument == '-b'):
while ((len(argv) > 0) and ((argv[0][0] != '-') or ( (argv[0][0] == '-') and (argv[0][1].isdigit()) ))):
arguments.box.append(argv.pop(0).rstrip())
arguments.region = expand_boxes(arguments.region, arguments.box)
#source = file(arguments.input,'r')
output = file(arguments.output,'w')
gmsh_comment('Arguments:' + arguments.call)
printv('Source netCDF located at ' + arguments.input)
printv('Output to ' + arguments.output)
if (len(arguments.boundaries) > 0):
printv('Boundaries restricted to ' + str(arguments.boundaries))
if (arguments.region is not 'True'):
printv('Region defined by ' + str(arguments.region))
if (arguments.dx != dx_default):
printv('Open contours closed with a line formed by points spaced %g degrees apart' % (arguments.dx))
if (arguments.extendtolatitude is not None):
printv('Extending region to meet parallel on latitude ' + str(arguments.extendtolatitude))
gmsh_comment('')
def gmsh_header():
earth_radius = 6.37101e+06
return '''
IP = newp;
IL = newl;
ILL = newll;
IS = news;
IFI = newf;
Point ( IP + 0 ) = { 0, 0, 0 };
Point ( IP + 1 ) = { 0, 0, %(earth_radius)g };
PolarSphere ( IS + 0 ) = { IP, IP + 1 };
''' % { 'earth_radius': earth_radius }
def gmsh_footer(loopstart, loopend):
output.write( '''
Field [ IFI + 0 ] = Attractor;
Field [ IFI + 0 ].NodesList = { IP + %(loopstart)i : IP + %(loopend)i };
''' % { 'loopstart':loopstart, 'loopend':loopend } )
def gmsh_remove_projection_points():
output.write( '''
Delete { Point{1}; }
Delete { Point{2}; }
''' )
def gmsh_format_point(index, loc, z):
accuracy = '.8'
format = 'Point ( IP + %%i ) = { %%%(dp)sf, %%%(dp)sf, %%%(dp)sf };\n' % { 'dp': accuracy }
output.write(format % (index, loc[0], loc[1], z))
#return "Point ( IP + %i ) = { %f, %f, %f }\n" % (index, x, y, z)
def project(location):
longitude = location[0]
latitude = location[1]
cos = math.cos
sin = math.sin
#pi = math.pi
#longitude_rad2 = longitude * ( pi / 180 )
#latitude_rad2 = latitude * ( pi / 180 )
longitude_rad = math.radians(- longitude - 90)
latitude_rad = math.radians(latitude)
# Changed sign in x formulae - need to check
x = sin( longitude_rad ) * cos( latitude_rad ) / ( 1 + sin( latitude_rad ) );
y = cos( longitude_rad ) * cos( latitude_rad ) / ( 1 + sin( latitude_rad ) );
return [ x, y ]
def read_rtopo(filename):
file = NetCDF.NetCDFFile(filename, 'r')
#variableNames = fileN.variables.keys()
lon = file.variables['lon'][:]
lat = file.variables['lat'][:]
field = file.variables['amask'][:, :]
# % 2
# 0 ocean 1
# 1 ice 0
# 2 shelf 1
# 3 rock 0
field = field % 2
paths = contour(lon,lat,field,levels=[0.5]).collections[0].get_paths()
return paths
def area_enclosed(p):
return 0.5 * abs(sum(x0*y1 - x1*y0 for ((x0, y0), (x1, y1)) in segments(p)))
def segments(p):
return zip(p, p[1:] + [p[0]])
def check_point_required(region, location):
# make all definitions of the math module available to the function
globals=math.__dict__
globals['longitude'] = location[0]
globals['latitude'] = location[1]
return eval(region, globals)
def array_to_gmsh_points(num, index, location, minarea, region, dx, latitude_max):
gmsh_comment('Ice-Land mass number %s' % (num))
count = 0
pointnumber = len(location[:,0])
valid = [False]*pointnumber
validnumber = 0
loopstart = None
loopend = None
flag = 0
#location[:, 0] = - location[:, 0] - 90.0
for point in range(pointnumber):
longitude = location[point, 0]
latitude = location[point, 1]
if ( check_point_required(region, location[point, :]) ):
valid[point] = True
validnumber += 1
if (flag == 0):
loopstart = point
flag = 1
elif (flag == 1):
loopend = point
#print latitude, valid[point]
if (loopend is None):
printvv('Path %i skipped (no points found in region)' % ( num ))
gmsh_comment(' Skipped (no points found in region)\n')
return index
closelast=False
if (compare_points(location[loopstart,:], location[loopend,:], dx)):
# Remove duplicate line at end
# Note loopend no longer valid
valid[loopend] = False
validnumber -= 1
closelast=True
validlocation = zeros( (validnumber, 2) )
close = [False]*validnumber
count = 0
closingrequired = False
closingrequirednumber = 0
for point in range(pointnumber):
if (valid[point]):
validlocation[count,:] = location[point,:]
if ((closingrequired) and (count > 0)):
if (compare_points(validlocation[count-1,:], validlocation[count,:], dx)):
closingrequired = False
close[count] = closingrequired
count += 1
closingrequired = False
else:
if (not closingrequired):
closingrequired = True
closingrequirednumber += 1
if (closelast):
close[-1] = True
closingrequirednumber += 1
if (closingrequirednumber == 0):
closingtext = ''
elif (closingrequirednumber == 1):
closingtext = ' (required closing in %i part of the path)' % (closingrequirednumber)
else:
closingtext = ' (required closing in %i parts of the path)' % (closingrequirednumber)
area = area_enclosed(validlocation)
if (area < minarea):
printvv('Path %i skipped (area too small)' % ( num ))
gmsh_comment(' Skipped (area too small)\n')
return index
printv('Path %i points %i/%i area %g%s' % ( num, validnumber, pointnumber, area_enclosed(validlocation), closingtext ))
# if (closingrequired and closewithparallel):
# latitude_max = None
# index_start = index + 1
# for point in range(validnumber - 1):
# longitude = validlocation[point,0]
# latitude = validlocation[point,1]
# index += 1
# loc = project(longitude, latitude)
# output.write( gmsh_format_point(index, loc, 0) )
# if (latitude_max is None):
# latitude_max = latitude
# else:
# latitude_max = max(latitude_max, latitude)
# draw_parallel(index, index_start, [ validlocation[point,0], max(latitude_max, validlocation[point,1]) ], [ validlocation[0,0], max(latitude_max, validlocation[0,1]) ], points=200)
# index += 200
#
# index += 1
# output.write( gmsh_format_point(index, project(validlocation[0,0], validlocation[0,1]), 0) )
#
# else:
if (close[0]):
close[-1] = close[0]
index.start = index.point + 1
loopstartpoint = index.start
for point in range(validnumber):
#longitude = validlocation[point,0]
#latitude = validlocation[point,1]
if ((close[point]) and (point == validnumber - 1) and (not (compare_points(validlocation[point], validlocation[0], dx)))):
gmsh_comment('**** END ' + str(point) + '/' + str(validnumber-1) + str(close[point]))
index = gmsh_loop(index, loopstartpoint, False, False)
index = draw_parallel_explicit(validlocation[point], validlocation[0], index, latitude_max, dx)
index = gmsh_loop(index, loopstartpoint, True, True)
gmsh_comment('**** END end of loop ' + str(closelast) + str(point) + '/' + str(validnumber-1) + str(close[point]))
elif ((close[point]) and (point > 0) and (not (compare_points(validlocation[point], validlocation[0], dx)))):
gmsh_comment('**** NOT END ' + str(point) + '/' + str(validnumber-1) + str(close[point]))
gmsh_comment(str(validlocation[point,:]) + str(validlocation[point,:]))
index = gmsh_loop(index, loopstartpoint, False, False)
index = draw_parallel_explicit(validlocation[point - 1], validlocation[point], index, latitude_max, dx)
index = gmsh_loop(index, loopstartpoint, False, True)
gmsh_comment('**** NOT END end of loop ' + str(point) + '/' + str(validnumber-1) + str(close[point]))
else:
index.point += 1
gmsh_format_point(index.point, project(validlocation[point,:]), 0)
index.contournodes.append(index.point)
index = gmsh_loop(index, loopstartpoint, (closelast and (point == validnumber - 1)), False)
return index
#LoopStart1 = IP + 20;
#LoopEnd1 = IP + 3157;
#BSpline ( IL + 1 ) = { IP + 20 : IP + 3157 };
#Line Loop( ILL + 10 ) = { IL + 1 };
#
#LoopStart1 = IP + 3157;
#LoopEnd1 = IP + 3231;
#BSpline ( IL + 2 ) = { IP + 3157 : IP + 3231, IP + 20 };
#Line Loop( ILL + 20 ) = { IL + 2 };
def gmsh_loop(index, loopstartpoint, last, open):
if (index.point <= index.start):
return index
#pointstart = indexstart
#pointend = index.point
#loopnumber = index.loop
if (last):
closure = ', IP + %(pointstart)i' % { 'pointstart':loopstartpoint }
else:
closure = ''
if (open):
index.open.append(index.path)
type = 'open'
else:
index.contour.append(index.path)
type = 'contour'
index.pathsinloop.append(index.path)
#//Line Loop( ILL + %(loopnumber)i ) = { IL + %(loopnumber)i };
#// Identified as a %(type)s path
output.write( '''LoopStart%(loopnumber)i = IP + %(pointstart)i;
LoopEnd%(loopnumber)i = IP + %(pointend)i;
BSpline ( IL + %(loopnumber)i ) = { IP + %(pointstart)i : IP + %(pointend)i%(loopstartpoint)s };
Physical Line( IL + %(loopnumber)i ) = { IL + %(loopnumber)i };
''' % { 'pointstart':index.start, 'pointend':index.point, 'loopnumber':index.path, 'loopstartpoint':closure, 'type':type } )
if (last):
output.write( '''Line Loop( ILL + %(loop)i ) = { %(loopnumbers)s };
''' % { 'loop':index.loop , 'loopnumbers':list_to_comma_separated(index.pathsinloop, prefix = 'IL + ') } )
index.loops.append(index.loop)
index.loop += 1
index.pathsinloop = []
index.path +=1
index.start = index.point
return index
def output_boundaries(index, filename, paths=None, minarea=0, region='True', dx=0.1, latitude_max=None):
pathall = read_rtopo(filename)
printv('Paths found: ' + str(len(pathall)))
output.write( gmsh_header() )
splinenumber = 0
indexbase = 1
index.point = indexbase
if ((paths is not None) and (len(paths) > 0)):
pathids=paths
else:
pathids=range(len(pathall)+1)[1:]
for num in pathids:
xy=pathall[num-1].vertices
#print xy
index = array_to_gmsh_points(num, index, xy, minarea, region, dx, latitude_max)
#for i in range(-85, 0, 5):
# indexend += 1
# output.write( gmsh_format_point(indexend, project(0, i), 0) )
#for i in range(-85, 0, 5):
# indexend += 1
# output.write( gmsh_format_point(indexend, project(45, i), 0) )
gmsh_remove_projection_points()
return index
def define_point(name, location):
# location [long, lat]
output.write('''
//Point %(name)s is located at, %(longitude).2f deg, %(latitude).2f deg.
Point_%(name)s_longitude_rad = (%(longitude)f + (00/60))*(Pi/180);
Point_%(name)s_latitude_rad = (%(latitude)f + (00/60))*(Pi/180);
Point_%(name)s_stereographic_y = Cos(Point_%(name)s_longitude_rad)*Cos(Point_%(name)s_latitude_rad) / ( 1 + Sin(Point_%(name)s_latitude_rad) );
Point_%(name)s_stereographic_x = Cos(Point_%(name)s_latitude_rad) *Sin(Point_%(name)s_longitude_rad) / ( 1 + Sin(Point_%(name)s_latitude_rad) );
''' % { 'name':name, 'longitude':location[0], 'latitude':location[1] } )
def draw_parallel(startn, endn, start, end, points=200):
startp = project(start)
endp = project(end)
output.write('''
pointsOnParallel = %(points)i;
parallelSectionStartingX = %(start_x)g;
parallelSectionStartingY = %(start_y)g;
firstPointOnParallel = IP + %(start_n)i;
parallelSectionEndingX = %(end_x)g;
parallelSectionEndingY = %(end_y)g;
lastPointOnParallel = IP + %(end_n)i;
newParallelID = IL + 10100;
Call DrawParallel;
''' % { 'start_x':startp[0], 'start_y':startp[1], 'end_x':endp[0], 'end_y':endp[1], 'start_n':startn, 'end_n':endn, 'points':points })
def compare_points(a, b, dx):
tolerance = dx * 0.6
if ( not (abs(a[1] - b[1]) < tolerance) ):
#gmsh_comment('lat differ')
return False
elif (abs(a[0] - b[0]) < tolerance):
#gmsh_comment('long same')
return True
elif ((abs(abs(a[0]) - 180) < tolerance) and (abs(abs(b[0]) - 180) < tolerance)):
#gmsh_comment('long +/-180')
return True
else:
#gmsh_comment('not same %g %g' % (abs(abs(a[0]) - 180), abs(abs(b[0]) - 180) ) )
return False
def draw_parallel_explicit(start, end, index, latitude_max, dx):
#print start, end, index.point
# Note start is actual start - 1
if (latitude_max is None):
latitude_max = max(start[1], end[1])
else:
latitude_max = max(latitude_max, start[1], end[1])
current = start
tolerance = dx * 0.6
gmsh_comment( 'Closing path with parallels and merdians, from (%.8f, %.8f) to (%.8f, %.8f)' % ( start[0], start[1], end[0], end[1] ) )
if (compare_points(current, end, dx)):
gmsh_comment('Points already close enough, no need to draw parallels and meridians after all')
return index
gmsh_comment('Drawing meridian to max latitude index %s at %f.2, %f.2 (to match %f.2)' % (index.point, current[0], current[1], latitude_max))
while (current[1] != latitude_max):
if (current[1] < latitude_max):
current[1] = current[1] + dx
else:
current[1] = current[1] - dx
if (abs(current[1] - latitude_max) < tolerance): current[1] = latitude_max
if (compare_points(current, end, dx)): return index
index.point += 1
printvv('Drawing meridian to max latitude index %s at %f.2, %f.2 (to match %f.2)' % (index.point, current[0], current[1], latitude_max))
loc = project(current)
gmsh_format_point(index.point, loc, 0.0)
gmsh_comment('Drawing parallel index %s at %f.2 (to match %f.2), %f.2' % (index.point, current[0], end[0], current[1]))
while (current[0] != end[0]):
if (current[0] < end[0]):
current[0] = current[0] + dx
else:
current[0] = current[0] - dx
if (abs(current[0] - end[0]) < tolerance): current[0] = end[0]
if (compare_points(current, end, dx)): return index
index.point += 1
printvv('Drawing parallel index %s at %f.2 (to match %f.2), %f.2' % (index.point, current[0], end[0], current[1]))
loc = project(current)
gmsh_format_point(index.point, loc, 0.0)
gmsh_comment('Drawing meridian to end index %s at %f.2, %f.2 (to match %f.2)' % (index.point, current[0], current[1], end[1]))
while (current[1] != end[1]):
if (current[1] < end[1]):
current[1] = current[1] + dx
else:
current[1] = current[1] - dx
if (abs(current[1] - end[1]) < tolerance): current[1] = end[1]
if (compare_points(current, end, dx)): return index
index.point += 1
printvv('Drawing meridian to end index %s at %f.2, %f.2 (to match %f.2)' % (index.point, current[0], current[1], end[1]))
loc = project(current)
gmsh_format_point(index.point, loc, 0.0)
gmsh_comment( 'Closed path with parallels and merdians, from (%.8f, %.8f) to (%.8f, %.8f)' % ( start[0], start[1], end[0], end[1] ) )
return index
def list_to_comma_separated(numbers, prefix='', add=0):
requirecomma = False
string = ''
for number in numbers:
if (requirecomma):
string += ', '
else:
requirecomma = True
string += prefix
string += str(number + add)
return string
def list_to_space_separated(numbers, prefix='', add=0):
requirespace = False
string = ''
for number in numbers:
if (requirespace):
string += ' '
else:
requirespace = True
string += prefix
string += str(number + add)
return string
def output_open_boundaries(index, boundary, dx):
parallel = -50.0
index.start = index.point + 1
loopstartpoint = index.start
index = draw_parallel_explicit([ -1.0, parallel], [ 179.0, parallel], index, None, dx)
index = draw_parallel_explicit([-179.0, parallel], [ 1.0, parallel], index, None, dx)
index = gmsh_loop(index, loopstartpoint, True, True)
return index
def output_surfaces(index, boundary):
printv('Open boundaries (id %i): %s' % (boundary.open, list_to_space_separated(index.open, add=1)))
printv('Closed boundaries (id %i): %s' % (boundary.contour, list_to_space_separated(index.contour, add=1)))
boundary_list = list_to_comma_separated(index.contour + index.open)
#//Line Loop( ILL + %(loopnumber)i ) = { %(boundary_list)s };
#//Plane Surface( %(surface)i ) = { ILL + %(loopnumber)i };
output.write('''
Plane Surface( %(surface)i ) = { %(boundary_list)s };
''' % { 'loopnumber':index.path, 'surface':boundary.surface + 1, 'boundary_list':list_to_comma_separated(index.loops, prefix = 'ILL + ') } )
def acc_array():
acc = array([[ 1.0, -53.0 ],
[ 10.0, -53.0 ],
[ 20.0, -52.0 ],
[ 30.0, -56.0 ],
[ 40.0, -60.0 ],
[ 50.0, -63.0 ],
[ 60.0, -64.0 ],
[ 70.0, -65.0 ],
[ 80.0, -67.0 ],
[ 90.0, -60.0 ],
[ 100.0, -58.0 ],
[ 110.0, -62.0 ],
[ 120.0, -63.0 ],
[ 130.0, -65.0 ],
[ 140.0, -65.0 ],
[ 150.0, -64.0 ],
[ 160.0, -61.0 ],
[ 170.0, -64.0 ],
[ 179.0, -65.0 ],
[-179.0, -65.0 ],
[-170.0, -64.0 ],
[-160.0, -62.0 ],
[-150.0, -66.0 ],
[-140.0, -58.0 ],
[-130.0, -60.0 ],
[-120.0, -65.0 ],
[-110.0, -66.0 ],
[-100.0, -70.0 ],
[ -90.0, -70.0 ],
[ -80.0, -77.0 ],
[ -70.0, -72.0 ],
[ -60.0, -60.0 ],
[ -50.0, -57.0 ],
[ -40.0, -51.0 ],
[ -30.0, -50.0 ],
[ -20.0, -60.0 ],
[ -10.0, -56.0 ],
[ -1.0, -53.0 ]])
return acc
def draw_acc_old(index, boundary, dx):
acc = acc_array()
gmsh_comment('ACC')
index.start = index.point + 1
loopstartpoint = index.start
for i in range(len(acc[:,0])):
index.point += 1
location = project(acc[i,:])
gmsh_format_point(index.point, location, 0.0)
for i in range(len(acc[:,0])):
a = index.start + i
b = a + 1
if (a == index.point):
b = index.start
output.write('Line(%i) = {%i,%i};\n' % (i + 100000, a, b ))
output.write('Line Loop(999999) = { %i : %i};\n' % ( index.start, index.point ))
return index
def draw_acc(index, boundary, dx):
acc = acc_array()
acc1 = acc[0:18,:]
acc2 = acc[19:,:]
print acc1
print acc2
gmsh_comment('ACC')
index.start = index.point + 1
loopstartpoint = index.start
for i in range(len(acc1[:,0])):
index.point += 1
location = project(acc1[i,:])
gmsh_format_point(index.point, location, 0.0)
index = gmsh_loop(index, loopstartpoint, False, True)
#index.start = index.point + 1
#loopstartpoint = index.start
for i in range(len(acc2[:,0])):
index.point += 1
location = project(acc2[i,:])
gmsh_format_point(index.point, location, 0.0)
index = gmsh_loop(index, loopstartpoint, True, True)
return index
def output_fields(index,boundary):
if (index.contour is not None):
output.write('''
Printf("Assigning characteristic mesh sizes...");
Field[ IFI + 1] = Attractor;
Field[ IFI + 1].EdgesList = { 999999, %(boundary_list)s };
Field [ IFI + 1 ].NNodesByEdge = 5e4;
Field[ IFI + 2] = Threshold;
Field[ IFI + 2].DistMax = 2e6;
Field[ IFI + 2].DistMin = 3e4;
Field[ IFI + 2].IField = IFI + 1;
Field[ IFI + 2].LcMin = 5e4;
Field[ IFI + 2].LcMax = 2e5;
Background Field = IFI + 2;
// Dont extent the elements sizes from the boundary inside the domain
Mesh.CharacteristicLengthExtendFromBoundary = 0;
//Set some options for better png output
General.Color.Background = {255,255,255};
General.Color.BackgroundGradient = {255,255,255};
General.Color.Foreground = Black;
Mesh.Color.Lines = {0,0,0};
General.Trackball = 0 ;
General.RotationX = 180;
General.RotationY = 0;
General.RotationZ = 270;
''' % { 'boundary_list':list_to_comma_separated(index.contour, prefix = 'IL + ') } )
if (arguments.bathymetrydatagenerate):
printv('Generating bathymetry data file '+arguments.bathymetrydataoutput)
sys.exit()
index = output_boundaries(index, filename=arguments.input, paths=arguments.boundaries, minarea=arguments.minarea, region=arguments.region, dx=arguments.dx, latitude_max=arguments.extendtolatitude)
if (arguments.open): index = output_open_boundaries(index, boundary, arguments.dx)
output_surfaces(index, boundary)
index = draw_acc(index, boundary, arguments.dx)
output_fields(index,boundary)
if (len(index.skipped) > 0):
printv('Skipped (because no point on the boundary appeared in the required region, or area enclosed by the boundary was too small):\n'+' '.join(index.skipped))
output.close()
| lgpl-2.1 |
d-gold/data_utils3 | bin/join.py | 1 | 3858 | #!/usr/bin/env python
# encoding: utf-8
# pylint: disable=W0141, C0103
import textwrap
import pandas as p
from fn import F
from lib.parse_args import add_common_arguments
from lib.d3_io import load_datasets, save_datasets
def get_argument_settings():
"""@todo: Docstring for get_argument_settings.
:returns: @todo
"""
description = textwrap.dedent("""
Joins two delimited files by common keys.
""")
epilog = textwrap.dedent("""
Examples:
""")
m_overrides = []
parser = add_common_arguments(m_overrides, description=description,
epilog=epilog)
parser.add_argument('--on',
dest='on',
default=None,
required=False,
nargs='+',
help="Common fields to join on")
parser.add_argument('--left-on',
dest='left_on',
default=None,
required=False,
nargs='+',
help="Join fields for left side (only two tables allowed)")
parser.add_argument('--right-on',
dest='right_on',
default=None,
required=False,
nargs='+',
help="Join fields for right side (only two tables allowed)")
parser.add_argument('--how',
dest='how',
default='inner',
required=False,
choices=('left', 'right', 'outer', 'inner'),
help="How to join")
return parser
def main():
"""@todo: Docstring for main.
:returns: @todo
"""
def load_csv(delimiter, filename):
"""@todo: Docstring for load_csv.
:filename: @todo
:returns: @todo
"""
return (type(filename) is file and
p.read_csv(filename, sep=delimiter)) or \
(filename.endswith('.gz') and
p.read_csv(filename, sep=delimiter, compression='gz')) or \
(filename.endswith('.bz2') and
p.read_csv(filename, sep=delimiter, compression='bz2')) or \
p.read_csv(filename, sep=delimiter)
def merge_data(on, dataset_1, dataset_2, how):
"""@todo: Docstring for merge_data.
:dataset_1: @todo
:dataset_2: @todo
:returns: @todo
"""
return p.merge(dataset_1, dataset_2, on=on, how=how)
def normal_join(input_files, on):
data = map(F(load_datasets, delimiter), input_files)
transformed = reduce(lambda x, y: merge_data(on, x, y, how), data)
return transformed
def mismatched_join(input_files, left_on, right_on, how):
data = map(F(load_datasets, delimiter), input_files[0:2])
transformed = p.merge(data[0], data[1],
left_on = left_on, right_on = right_on,
how=how)
return transformed
parser = get_argument_settings()
args = parser.parse_args()
input_files = args.input_files
if len(input_files) < 2:
print "At least two files are required to join"
return
on = args.on
left_on = args.left_on
right_on = args.right_on
delimiter = args.delimiter
how = args.how
if on:
transformed = normal_join(input_files, on, how)
else:
transformed = mismatched_join(input_files, left_on, right_on, how)
# data = map(F(load_datasets, delimiter), input_files)
# transformed = reduce(lambda x, y: merge_data(on, x, y), data)
result = save_datasets([transformed], args.output_file,
delimiter=delimiter)
return result
if __name__ == '__main__':
main()
| mit |
bhermansyah/DRR-datacenter | geodb/radarchart.py | 1 | 2091 | # # from matplotlib.ticker import FormatStrFormatter
# import numpy as np
# import pylab as pl
# import matplotlib.pyplot as plt
import StringIO
import base64
from graphos.renderers.base import BaseChart
import pygal
from pygal.style import Style
# from matplotlib.path import Path
# from matplotlib.spines import Spine
# from matplotlib.projections.polar import PolarAxes
# from matplotlib.projections import register_projection
class BaseMatplotlibChart(BaseChart):
def get_template(self):
return "radar.html"
def get_serieses(self):
data_only = self.get_data()[1:]
return data_only
def get_xlabels(self):
response = []
for i in self.get_serieses():
response.append(i[0])
return response
def get_opt_data(self, index):
response = []
for i in self.get_data()[1:]:
response.append(i[index])
return response
class RadarChart(BaseMatplotlibChart):
def get_image(self):
custom_style = Style(
background='#ffffff',
plot_background='#ffffff',
# foreground='#53E89B',
# foreground_strong='#53A0E8',
# foreground_subtle='#630C0D',
# opacity='.6',
# opacity_hover='.9',
# transition='400ms ease-in',
colors=('rgb(255, 0, 0)', 'rgb(18, 5, 240)', 'rgb(255, 153, 0)', 'rgb(16, 150, 24)'))
radar_chart = pygal.Radar(legend_at_bottom=True,width=450,height=450,style=custom_style,show_legend=False)
radar_chart.title = self.get_options()['title']
radar_chart.x_labels = self.get_xlabels()
for i in self.get_options()['col-included']:
radar_chart.add(i['name'], self.get_opt_data(i['col-no']), fill=i['fill'], show_dots=False, stroke_style={'width': 3, 'linecap': 'round', 'linejoin': 'round'})
# radar_chart.add('Dead', self.get_opt_data(2), fill=True, show_dots=False)
# radar_chart.add('Violent', self.get_opt_data(3), fill=True, show_dots=False)
# radar_chart.add('Injured', self.get_opt_data(4), fill=False, show_dots=False)
return radar_chart.render_data_uri(human_readable=True)
# return radar_chart
| gpl-3.0 |
sinhrks/seaborn | seaborn/axisgrid.py | 20 | 66716 | from __future__ import division
from itertools import product
from distutils.version import LooseVersion
import warnings
from textwrap import dedent
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
from six import string_types
from . import utils
from .palettes import color_palette
class Grid(object):
"""Base class for grids of subplots."""
_margin_titles = False
_legend_out = True
def set(self, **kwargs):
"""Set attributes on each subplot Axes."""
for ax in self.axes.flat:
ax.set(**kwargs)
return self
def savefig(self, *args, **kwargs):
"""Save the figure."""
kwargs = kwargs.copy()
kwargs.setdefault("bbox_inches", "tight")
self.fig.savefig(*args, **kwargs)
def add_legend(self, legend_data=None, title=None, label_order=None,
**kwargs):
"""Draw a legend, maybe placing it outside axes and resizing the figure.
Parameters
----------
legend_data : dict, optional
Dictionary mapping label names to matplotlib artist handles. The
default reads from ``self._legend_data``.
title : string, optional
Title for the legend. The default reads from ``self._hue_var``.
label_order : list of labels, optional
The order that the legend entries should appear in. The default
reads from ``self.hue_names`` or sorts the keys in ``legend_data``.
kwargs : key, value pairings
Other keyword arguments are passed to the underlying legend methods
on the Figure or Axes object.
Returns
-------
self : Grid instance
Returns self for easy chaining.
"""
# Find the data for the legend
legend_data = self._legend_data if legend_data is None else legend_data
if label_order is None:
if self.hue_names is None:
label_order = np.sort(list(legend_data.keys()))
else:
label_order = list(map(str, self.hue_names))
blank_handle = mpl.patches.Patch(alpha=0, linewidth=0)
handles = [legend_data.get(l, blank_handle) for l in label_order]
title = self._hue_var if title is None else title
try:
title_size = mpl.rcParams["axes.labelsize"] * .85
except TypeError: # labelsize is something like "large"
title_size = mpl.rcParams["axes.labelsize"]
# Set default legend kwargs
kwargs.setdefault("scatterpoints", 1)
if self._legend_out:
# Draw a full-figure legend outside the grid
figlegend = self.fig.legend(handles, label_order, "center right",
**kwargs)
self._legend = figlegend
figlegend.set_title(title)
# Set the title size a roundabout way to maintain
# compatability with matplotlib 1.1
prop = mpl.font_manager.FontProperties(size=title_size)
figlegend._legend_title_box._text.set_font_properties(prop)
# Draw the plot to set the bounding boxes correctly
plt.draw()
# Calculate and set the new width of the figure so the legend fits
legend_width = figlegend.get_window_extent().width / self.fig.dpi
figure_width = self.fig.get_figwidth()
self.fig.set_figwidth(figure_width + legend_width)
# Draw the plot again to get the new transformations
plt.draw()
# Now calculate how much space we need on the right side
legend_width = figlegend.get_window_extent().width / self.fig.dpi
space_needed = legend_width / (figure_width + legend_width)
margin = .04 if self._margin_titles else .01
self._space_needed = margin + space_needed
right = 1 - self._space_needed
# Place the subplot axes to give space for the legend
self.fig.subplots_adjust(right=right)
else:
# Draw a legend in the first axis
ax = self.axes.flat[0]
leg = ax.legend(handles, label_order, loc="best", **kwargs)
leg.set_title(title)
# Set the title size a roundabout way to maintain
# compatability with matplotlib 1.1
prop = mpl.font_manager.FontProperties(size=title_size)
leg._legend_title_box._text.set_font_properties(prop)
return self
def _clean_axis(self, ax):
"""Turn off axis labels and legend."""
ax.set_xlabel("")
ax.set_ylabel("")
ax.legend_ = None
return self
def _update_legend_data(self, ax):
"""Extract the legend data from an axes object and save it."""
handles, labels = ax.get_legend_handles_labels()
data = {l: h for h, l in zip(handles, labels)}
self._legend_data.update(data)
def _get_palette(self, data, hue, hue_order, palette):
"""Get a list of colors for the hue variable."""
if hue is None:
palette = color_palette(n_colors=1)
else:
hue_names = utils.categorical_order(data[hue], hue_order)
n_colors = len(hue_names)
# By default use either the current color palette or HUSL
if palette is None:
current_palette = mpl.rcParams["axes.color_cycle"]
if n_colors > len(current_palette):
colors = color_palette("husl", n_colors)
else:
colors = color_palette(n_colors=n_colors)
# Allow for palette to map from hue variable names
elif isinstance(palette, dict):
color_names = [palette[h] for h in hue_names]
colors = color_palette(color_names, n_colors)
# Otherwise act as if we just got a list of colors
else:
colors = color_palette(palette, n_colors)
palette = color_palette(colors, n_colors)
return palette
_facet_docs = dict(
data=dedent("""\
data : DataFrame
Tidy ("long-form") dataframe where each column is a variable and each
row is an observation.\
"""),
col_wrap=dedent("""\
col_wrap : int, optional
"Wrap" the column variable at this width, so that the column facets
span multiple rows. Incompatible with a ``row`` facet.\
"""),
share_xy=dedent("""\
share_{x,y} : bool, optional
If true, the facets will share y axes across columns and/or x axes
across rows.\
"""),
size=dedent("""\
size : scalar, optional
Height (in inches) of each facet. See also: ``aspect``.\
"""),
aspect=dedent("""\
aspect : scalar, optional
Aspect ratio of each facet, so that ``aspect * size`` gives the width
of each facet in inches.\
"""),
palette=dedent("""\
palette : seaborn color palette or dict, optional
Colors to use for the different levels of the ``hue`` variable. Should
be something that can be interpreted by :func:`color_palette`, or a
dictionary mapping hue levels to matplotlib colors.\
"""),
legend_out=dedent("""\
legend_out : bool, optional
If ``True``, the figure size will be extended, and the legend will be
drawn outside the plot on the center right.\
"""),
margin_titles=dedent("""\
margin_titles : bool, optional
If ``True``, the titles for the row variable are drawn to the right of
the last column. This option is experimental and may not work in all
cases.\
"""),
)
class FacetGrid(Grid):
"""Subplot grid for plotting conditional relationships."""
def __init__(self, data, row=None, col=None, hue=None, col_wrap=None,
sharex=True, sharey=True, size=3, aspect=1, palette=None,
row_order=None, col_order=None, hue_order=None, hue_kws=None,
dropna=True, legend_out=True, despine=True,
margin_titles=False, xlim=None, ylim=None, subplot_kws=None,
gridspec_kws=None):
MPL_GRIDSPEC_VERSION = LooseVersion('1.4')
OLD_MPL = LooseVersion(mpl.__version__) < MPL_GRIDSPEC_VERSION
# Determine the hue facet layer information
hue_var = hue
if hue is None:
hue_names = None
else:
hue_names = utils.categorical_order(data[hue], hue_order)
colors = self._get_palette(data, hue, hue_order, palette)
# Set up the lists of names for the row and column facet variables
if row is None:
row_names = []
else:
row_names = utils.categorical_order(data[row], row_order)
if col is None:
col_names = []
else:
col_names = utils.categorical_order(data[col], col_order)
# Additional dict of kwarg -> list of values for mapping the hue var
hue_kws = hue_kws if hue_kws is not None else {}
# Make a boolean mask that is True anywhere there is an NA
# value in one of the faceting variables, but only if dropna is True
none_na = np.zeros(len(data), np.bool)
if dropna:
row_na = none_na if row is None else data[row].isnull()
col_na = none_na if col is None else data[col].isnull()
hue_na = none_na if hue is None else data[hue].isnull()
not_na = ~(row_na | col_na | hue_na)
else:
not_na = ~none_na
# Compute the grid shape
ncol = 1 if col is None else len(col_names)
nrow = 1 if row is None else len(row_names)
self._n_facets = ncol * nrow
self._col_wrap = col_wrap
if col_wrap is not None:
if row is not None:
err = "Cannot use `row` and `col_wrap` together."
raise ValueError(err)
ncol = col_wrap
nrow = int(np.ceil(len(data[col].unique()) / col_wrap))
self._ncol = ncol
self._nrow = nrow
# Calculate the base figure size
# This can get stretched later by a legend
figsize = (ncol * size * aspect, nrow * size)
# Validate some inputs
if col_wrap is not None:
margin_titles = False
# Build the subplot keyword dictionary
subplot_kws = {} if subplot_kws is None else subplot_kws.copy()
gridspec_kws = {} if gridspec_kws is None else gridspec_kws.copy()
if xlim is not None:
subplot_kws["xlim"] = xlim
if ylim is not None:
subplot_kws["ylim"] = ylim
# Initialize the subplot grid
if col_wrap is None:
kwargs = dict(figsize=figsize, squeeze=False,
sharex=sharex, sharey=sharey,
subplot_kw=subplot_kws,
gridspec_kw=gridspec_kws)
if OLD_MPL:
_ = kwargs.pop('gridspec_kw', None)
if gridspec_kws:
msg = "gridspec module only available in mpl >= {}"
warnings.warn(msg.format(MPL_GRIDSPEC_VERSION))
fig, axes = plt.subplots(nrow, ncol, **kwargs)
self.axes = axes
else:
# If wrapping the col variable we need to make the grid ourselves
if gridspec_kws:
warnings.warn("`gridspec_kws` ignored when using `col_wrap`")
n_axes = len(col_names)
fig = plt.figure(figsize=figsize)
axes = np.empty(n_axes, object)
axes[0] = fig.add_subplot(nrow, ncol, 1, **subplot_kws)
if sharex:
subplot_kws["sharex"] = axes[0]
if sharey:
subplot_kws["sharey"] = axes[0]
for i in range(1, n_axes):
axes[i] = fig.add_subplot(nrow, ncol, i + 1, **subplot_kws)
self.axes = axes
# Now we turn off labels on the inner axes
if sharex:
for ax in self._not_bottom_axes:
for label in ax.get_xticklabels():
label.set_visible(False)
ax.xaxis.offsetText.set_visible(False)
if sharey:
for ax in self._not_left_axes:
for label in ax.get_yticklabels():
label.set_visible(False)
ax.yaxis.offsetText.set_visible(False)
# Set up the class attributes
# ---------------------------
# First the public API
self.data = data
self.fig = fig
self.axes = axes
self.row_names = row_names
self.col_names = col_names
self.hue_names = hue_names
self.hue_kws = hue_kws
# Next the private variables
self._nrow = nrow
self._row_var = row
self._ncol = ncol
self._col_var = col
self._margin_titles = margin_titles
self._col_wrap = col_wrap
self._hue_var = hue_var
self._colors = colors
self._legend_out = legend_out
self._legend = None
self._legend_data = {}
self._x_var = None
self._y_var = None
self._dropna = dropna
self._not_na = not_na
# Make the axes look good
fig.tight_layout()
if despine:
self.despine()
__init__.__doc__ = dedent("""\
Initialize the matplotlib figure and FacetGrid object.
The :class:`FacetGrid` is an object that links a Pandas DataFrame to
a matplotlib figure with a particular structure.
In particular, :class:`FacetGrid` is used to draw plots with multiple
Axes where each Axes shows the same relationship conditioned on
different levels of some variable. It's possible to condition on up to
three variables by assigning variables to the rows and columns of the
grid and using different colors for the plot elements.
The general approach to plotting here is called "small multiples",
where the same kind of plot is repeated multiple times, and the
specific use of small multiples to display the same relationship
conditioned on one ore more other variables is often called a "trellis
plot".
The basic workflow is to initialize the :class:`FacetGrid` object with
the dataset and the variables that are used to structure the grid. Then
one or more plotting functions can be applied to each subset by calling
:meth:`FacetGrid.map` or :meth:`FacetGrid.map_dataframe`. Finally, the
plot can be tweaked with other methods to do things like change the
axis labels, use different ticks, or add a legend. See the detailed
code examples below for more information.
Parameters
----------
{data}
row, col, hue : strings
Variables that define subsets of the data, which will be drawn on
separate facets in the grid. See the ``*_order`` parameters to
control the order of levels of this variable.
{col_wrap}
{share_xy}
{size}
{aspect}
{palette}
{{row,col,hue}}_order : lists, optional
Order for the levels of the faceting variables. By default, this
will be the order that the levels appear in ``data`` or, if the
variables are pandas categoricals, the category order.
hue_kws : dictionary of param -> list of values mapping
Other keyword arguments to insert into the plotting call to let
other plot attributes vary across levels of the hue variable (e.g.
the markers in a scatterplot).
{legend_out}
despine : boolean, optional
Remove the top and right spines from the plots.
{margin_titles}
{{x, y}}lim: tuples, optional
Limits for each of the axes on each facet (only relevant when
share{{x, y}} is True.
subplot_kws : dict, optional
Dictionary of keyword arguments passed to matplotlib subplot(s)
methods.
gridspec_kws : dict, optional
Dictionary of keyword arguments passed to matplotlib's ``gridspec``
module (via ``plt.subplots``). Requires matplotlib >= 1.4 and is
ignored if ``col_wrap`` is not ``None``.
See Also
--------
PairGrid : Subplot grid for plotting pairwise relationships.
lmplot : Combine a regression plot and a :class:`FacetGrid`.
factorplot : Combine a categorical plot and a :class:`FacetGrid`.
Examples
--------
Initialize a 2x2 grid of facets using the tips dataset:
.. plot::
:context: close-figs
>>> import seaborn as sns; sns.set(style="ticks", color_codes=True)
>>> tips = sns.load_dataset("tips")
>>> g = sns.FacetGrid(tips, col="time", row="smoker")
Draw a univariate plot on each facet:
.. plot::
:context: close-figs
>>> import matplotlib.pyplot as plt
>>> g = sns.FacetGrid(tips, col="time", row="smoker")
>>> g = g.map(plt.hist, "total_bill")
(Note that it's not necessary to re-catch the returned variable; it's
the same object, but doing so in the examples makes dealing with the
doctests somewhat less annoying).
Pass additional keyword arguments to the mapped function:
.. plot::
:context: close-figs
>>> import numpy as np
>>> bins = np.arange(0, 65, 5)
>>> g = sns.FacetGrid(tips, col="time", row="smoker")
>>> g = g.map(plt.hist, "total_bill", bins=bins, color="r")
Plot a bivariate function on each facet:
.. plot::
:context: close-figs
>>> g = sns.FacetGrid(tips, col="time", row="smoker")
>>> g = g.map(plt.scatter, "total_bill", "tip", edgecolor="w")
Assign one of the variables to the color of the plot elements:
.. plot::
:context: close-figs
>>> g = sns.FacetGrid(tips, col="time", hue="smoker")
>>> g = (g.map(plt.scatter, "total_bill", "tip", edgecolor="w")
... .add_legend())
Change the size and aspect ratio of each facet:
.. plot::
:context: close-figs
>>> g = sns.FacetGrid(tips, col="day", size=4, aspect=.5)
>>> g = g.map(sns.boxplot, "time", "total_bill")
Specify the order for plot elements:
.. plot::
:context: close-figs
>>> g = sns.FacetGrid(tips, col="smoker", col_order=["Yes", "No"])
>>> g = g.map(plt.hist, "total_bill", bins=bins, color="m")
Use a different color palette:
.. plot::
:context: close-figs
>>> kws = dict(s=50, linewidth=.5, edgecolor="w")
>>> g = sns.FacetGrid(tips, col="sex", hue="time", palette="Set1",
... hue_order=["Dinner", "Lunch"])
>>> g = (g.map(plt.scatter, "total_bill", "tip", **kws)
... .add_legend())
Use a dictionary mapping hue levels to colors:
.. plot::
:context: close-figs
>>> pal = dict(Lunch="seagreen", Dinner="gray")
>>> g = sns.FacetGrid(tips, col="sex", hue="time", palette=pal,
... hue_order=["Dinner", "Lunch"])
>>> g = (g.map(plt.scatter, "total_bill", "tip", **kws)
... .add_legend())
Additionally use a different marker for the hue levels:
.. plot::
:context: close-figs
>>> g = sns.FacetGrid(tips, col="sex", hue="time", palette=pal,
... hue_order=["Dinner", "Lunch"],
... hue_kws=dict(marker=["^", "v"]))
>>> g = (g.map(plt.scatter, "total_bill", "tip", **kws)
... .add_legend())
"Wrap" a column variable with many levels into the rows:
.. plot::
:context: close-figs
>>> attend = sns.load_dataset("attention")
>>> g = sns.FacetGrid(attend, col="subject", col_wrap=5,
... size=1.5, ylim=(0, 10))
>>> g = g.map(sns.pointplot, "solutions", "score", scale=.7)
Define a custom bivariate function to map onto the grid:
.. plot::
:context: close-figs
>>> from scipy import stats
>>> def qqplot(x, y, **kwargs):
... _, xr = stats.probplot(x, fit=False)
... _, yr = stats.probplot(y, fit=False)
... plt.scatter(xr, yr, **kwargs)
>>> g = sns.FacetGrid(tips, col="smoker", hue="sex")
>>> g = (g.map(qqplot, "total_bill", "tip", **kws)
... .add_legend())
Define a custom function that uses a ``DataFrame`` object and accepts
column names as positional variables:
.. plot::
:context: close-figs
>>> import pandas as pd
>>> df = pd.DataFrame(
... data=np.random.randn(90, 4),
... columns=pd.Series(list("ABCD"), name="walk"),
... index=pd.date_range("Jan 1", "March 31", name="date"))
>>> df = df.cumsum(axis=0).stack().reset_index(name="val")
>>> def dateplot(x, y, **kwargs):
... ax = plt.gca()
... data = kwargs.pop("data")
... data.plot(x=x, y=y, ax=ax, grid=False, **kwargs)
>>> g = sns.FacetGrid(df, col="walk", col_wrap=2, size=3.5)
>>> g = g.map_dataframe(dateplot, "date", "val")
Use different axes labels after plotting:
.. plot::
:context: close-figs
>>> g = sns.FacetGrid(tips, col="smoker", row="sex")
>>> g = (g.map(plt.scatter, "total_bill", "tip", color="g", **kws)
... .set_axis_labels("Total bill (US Dollars)", "Tip"))
Set other attributes that are shared across the facetes:
.. plot::
:context: close-figs
>>> g = sns.FacetGrid(tips, col="smoker", row="sex")
>>> g = (g.map(plt.scatter, "total_bill", "tip", color="r", **kws)
... .set(xlim=(0, 60), ylim=(0, 12),
... xticks=[10, 30, 50], yticks=[2, 6, 10]))
Use a different template for the facet titles:
.. plot::
:context: close-figs
>>> g = sns.FacetGrid(tips.sort("size"), col="size", col_wrap=3)
>>> g = (g.map(plt.hist, "tip", bins=np.arange(0, 13), color="c")
... .set_titles("{{col_name}} diners"))
Tighten the facets:
.. plot::
:context: close-figs
>>> g = sns.FacetGrid(tips, col="smoker", row="sex",
... margin_titles=True)
>>> g = (g.map(plt.scatter, "total_bill", "tip", color="m", **kws)
... .set(xlim=(0, 60), ylim=(0, 12),
... xticks=[10, 30, 50], yticks=[2, 6, 10])
... .fig.subplots_adjust(wspace=.05, hspace=.05))
""").format(**_facet_docs)
def facet_data(self):
"""Generator for name indices and data subsets for each facet.
Yields
------
(i, j, k), data_ijk : tuple of ints, DataFrame
The ints provide an index into the {row, col, hue}_names attribute,
and the dataframe contains a subset of the full data corresponding
to each facet. The generator yields subsets that correspond with
the self.axes.flat iterator, or self.axes[i, j] when `col_wrap`
is None.
"""
data = self.data
# Construct masks for the row variable
if self._nrow == 1 or self._col_wrap is not None:
row_masks = [np.repeat(True, len(self.data))]
else:
row_masks = [data[self._row_var] == n for n in self.row_names]
# Construct masks for the column variable
if self._ncol == 1:
col_masks = [np.repeat(True, len(self.data))]
else:
col_masks = [data[self._col_var] == n for n in self.col_names]
# Construct masks for the hue variable
if len(self._colors) == 1:
hue_masks = [np.repeat(True, len(self.data))]
else:
hue_masks = [data[self._hue_var] == n for n in self.hue_names]
# Here is the main generator loop
for (i, row), (j, col), (k, hue) in product(enumerate(row_masks),
enumerate(col_masks),
enumerate(hue_masks)):
data_ijk = data[row & col & hue & self._not_na]
yield (i, j, k), data_ijk
def map(self, func, *args, **kwargs):
"""Apply a plotting function to each facet's subset of the data.
Parameters
----------
func : callable
A plotting function that takes data and keyword arguments. It
must plot to the currently active matplotlib Axes and take a
`color` keyword argument. If faceting on the `hue` dimension,
it must also take a `label` keyword argument.
args : strings
Column names in self.data that identify variables with data to
plot. The data for each variable is passed to `func` in the
order the variables are specified in the call.
kwargs : keyword arguments
All keyword arguments are passed to the plotting function.
Returns
-------
self : object
Returns self.
"""
# If color was a keyword argument, grab it here
kw_color = kwargs.pop("color", None)
# Iterate over the data subsets
for (row_i, col_j, hue_k), data_ijk in self.facet_data():
# If this subset is null, move on
if not data_ijk.values.tolist():
continue
# Get the current axis
ax = self.facet_axis(row_i, col_j)
# Decide what color to plot with
kwargs["color"] = self._facet_color(hue_k, kw_color)
# Insert the other hue aesthetics if appropriate
for kw, val_list in self.hue_kws.items():
kwargs[kw] = val_list[hue_k]
# Insert a label in the keyword arguments for the legend
if self._hue_var is not None:
kwargs["label"] = str(self.hue_names[hue_k])
# Get the actual data we are going to plot with
plot_data = data_ijk[list(args)]
if self._dropna:
plot_data = plot_data.dropna()
plot_args = [v for k, v in plot_data.iteritems()]
# Some matplotlib functions don't handle pandas objects correctly
if func.__module__ is not None:
if func.__module__.startswith("matplotlib"):
plot_args = [v.values for v in plot_args]
# Draw the plot
self._facet_plot(func, ax, plot_args, kwargs)
# Finalize the annotations and layout
self._finalize_grid(args[:2])
return self
def map_dataframe(self, func, *args, **kwargs):
"""Like `map` but passes args as strings and inserts data in kwargs.
This method is suitable for plotting with functions that accept a
long-form DataFrame as a `data` keyword argument and access the
data in that DataFrame using string variable names.
Parameters
----------
func : callable
A plotting function that takes data and keyword arguments. Unlike
the `map` method, a function used here must "understand" Pandas
objects. It also must plot to the currently active matplotlib Axes
and take a `color` keyword argument. If faceting on the `hue`
dimension, it must also take a `label` keyword argument.
args : strings
Column names in self.data that identify variables with data to
plot. The data for each variable is passed to `func` in the
order the variables are specified in the call.
kwargs : keyword arguments
All keyword arguments are passed to the plotting function.
Returns
-------
self : object
Returns self.
"""
# If color was a keyword argument, grab it here
kw_color = kwargs.pop("color", None)
# Iterate over the data subsets
for (row_i, col_j, hue_k), data_ijk in self.facet_data():
# If this subset is null, move on
if not data_ijk.values.tolist():
continue
# Get the current axis
ax = self.facet_axis(row_i, col_j)
# Decide what color to plot with
kwargs["color"] = self._facet_color(hue_k, kw_color)
# Insert the other hue aesthetics if appropriate
for kw, val_list in self.hue_kws.items():
kwargs[kw] = val_list[hue_k]
# Insert a label in the keyword arguments for the legend
if self._hue_var is not None:
kwargs["label"] = self.hue_names[hue_k]
# Stick the facet dataframe into the kwargs
if self._dropna:
data_ijk = data_ijk.dropna()
kwargs["data"] = data_ijk
# Draw the plot
self._facet_plot(func, ax, args, kwargs)
# Finalize the annotations and layout
self._finalize_grid(args[:2])
return self
def _facet_color(self, hue_index, kw_color):
color = self._colors[hue_index]
if kw_color is not None:
return kw_color
elif color is not None:
return color
def _facet_plot(self, func, ax, plot_args, plot_kwargs):
# Draw the plot
func(*plot_args, **plot_kwargs)
# Sort out the supporting information
self._update_legend_data(ax)
self._clean_axis(ax)
def _finalize_grid(self, axlabels):
"""Finalize the annotations and layout."""
self.set_axis_labels(*axlabels)
self.set_titles()
self.fig.tight_layout()
def facet_axis(self, row_i, col_j):
"""Make the axis identified by these indices active and return it."""
# Calculate the actual indices of the axes to plot on
if self._col_wrap is not None:
ax = self.axes.flat[col_j]
else:
ax = self.axes[row_i, col_j]
# Get a reference to the axes object we want, and make it active
plt.sca(ax)
return ax
def despine(self, **kwargs):
"""Remove axis spines from the facets."""
utils.despine(self.fig, **kwargs)
return self
def set_axis_labels(self, x_var=None, y_var=None):
"""Set axis labels on the left column and bottom row of the grid."""
if x_var is not None:
self._x_var = x_var
self.set_xlabels(x_var)
if y_var is not None:
self._y_var = y_var
self.set_ylabels(y_var)
return self
def set_xlabels(self, label=None, **kwargs):
"""Label the x axis on the bottom row of the grid."""
if label is None:
label = self._x_var
for ax in self._bottom_axes:
ax.set_xlabel(label, **kwargs)
return self
def set_ylabels(self, label=None, **kwargs):
"""Label the y axis on the left column of the grid."""
if label is None:
label = self._y_var
for ax in self._left_axes:
ax.set_ylabel(label, **kwargs)
return self
def set_xticklabels(self, labels=None, step=None, **kwargs):
"""Set x axis tick labels on the bottom row of the grid."""
for ax in self._bottom_axes:
if labels is None:
labels = [l.get_text() for l in ax.get_xticklabels()]
if step is not None:
xticks = ax.get_xticks()[::step]
labels = labels[::step]
ax.set_xticks(xticks)
ax.set_xticklabels(labels, **kwargs)
return self
def set_yticklabels(self, labels=None, **kwargs):
"""Set y axis tick labels on the left column of the grid."""
for ax in self._left_axes:
if labels is None:
labels = [l.get_text() for l in ax.get_yticklabels()]
ax.set_yticklabels(labels, **kwargs)
return self
def set_titles(self, template=None, row_template=None, col_template=None,
**kwargs):
"""Draw titles either above each facet or on the grid margins.
Parameters
----------
template : string
Template for all titles with the formatting keys {col_var} and
{col_name} (if using a `col` faceting variable) and/or {row_var}
and {row_name} (if using a `row` faceting variable).
row_template:
Template for the row variable when titles are drawn on the grid
margins. Must have {row_var} and {row_name} formatting keys.
col_template:
Template for the row variable when titles are drawn on the grid
margins. Must have {col_var} and {col_name} formatting keys.
Returns
-------
self: object
Returns self.
"""
args = dict(row_var=self._row_var, col_var=self._col_var)
kwargs["size"] = kwargs.pop("size", mpl.rcParams["axes.labelsize"])
# Establish default templates
if row_template is None:
row_template = "{row_var} = {row_name}"
if col_template is None:
col_template = "{col_var} = {col_name}"
if template is None:
if self._row_var is None:
template = col_template
elif self._col_var is None:
template = row_template
else:
template = " | ".join([row_template, col_template])
if self._margin_titles:
if self.row_names is not None:
# Draw the row titles on the right edge of the grid
for i, row_name in enumerate(self.row_names):
ax = self.axes[i, -1]
args.update(dict(row_name=row_name))
title = row_template.format(**args)
bgcolor = self.fig.get_facecolor()
ax.annotate(title, xy=(1.02, .5), xycoords="axes fraction",
rotation=270, ha="left", va="center",
backgroundcolor=bgcolor, **kwargs)
if self.col_names is not None:
# Draw the column titles as normal titles
for j, col_name in enumerate(self.col_names):
args.update(dict(col_name=col_name))
title = col_template.format(**args)
self.axes[0, j].set_title(title, **kwargs)
return self
# Otherwise title each facet with all the necessary information
if (self._row_var is not None) and (self._col_var is not None):
for i, row_name in enumerate(self.row_names):
for j, col_name in enumerate(self.col_names):
args.update(dict(row_name=row_name, col_name=col_name))
title = template.format(**args)
self.axes[i, j].set_title(title, **kwargs)
elif self.row_names is not None and len(self.row_names):
for i, row_name in enumerate(self.row_names):
args.update(dict(row_name=row_name))
title = template.format(**args)
self.axes[i, 0].set_title(title, **kwargs)
elif self.col_names is not None and len(self.col_names):
for i, col_name in enumerate(self.col_names):
args.update(dict(col_name=col_name))
title = template.format(**args)
# Index the flat array so col_wrap works
self.axes.flat[i].set_title(title, **kwargs)
return self
@property
def ax(self):
"""Easy access to single axes."""
if self.axes.shape == (1, 1):
return self.axes[0, 0]
else:
raise AttributeError
@property
def _inner_axes(self):
"""Return a flat array of the inner axes."""
if self._col_wrap is None:
return self.axes[:-1, 1:].flat
else:
axes = []
n_empty = self._nrow * self._ncol - self._n_facets
for i, ax in enumerate(self.axes):
append = (i % self._ncol and
i < (self._ncol * (self._nrow - 1)) and
i < (self._ncol * (self._nrow - 1) - n_empty))
if append:
axes.append(ax)
return np.array(axes, object).flat
@property
def _left_axes(self):
"""Return a flat array of the left column of axes."""
if self._col_wrap is None:
return self.axes[:, 0].flat
else:
axes = []
for i, ax in enumerate(self.axes):
if not i % self._ncol:
axes.append(ax)
return np.array(axes, object).flat
@property
def _not_left_axes(self):
"""Return a flat array of axes that aren't on the left column."""
if self._col_wrap is None:
return self.axes[:, 1:].flat
else:
axes = []
for i, ax in enumerate(self.axes):
if i % self._ncol:
axes.append(ax)
return np.array(axes, object).flat
@property
def _bottom_axes(self):
"""Return a flat array of the bottom row of axes."""
if self._col_wrap is None:
return self.axes[-1, :].flat
else:
axes = []
n_empty = self._nrow * self._ncol - self._n_facets
for i, ax in enumerate(self.axes):
append = (i >= (self._ncol * (self._nrow - 1)) or
i >= (self._ncol * (self._nrow - 1) - n_empty))
if append:
axes.append(ax)
return np.array(axes, object).flat
@property
def _not_bottom_axes(self):
"""Return a flat array of axes that aren't on the bottom row."""
if self._col_wrap is None:
return self.axes[:-1, :].flat
else:
axes = []
n_empty = self._nrow * self._ncol - self._n_facets
for i, ax in enumerate(self.axes):
append = (i < (self._ncol * (self._nrow - 1)) and
i < (self._ncol * (self._nrow - 1) - n_empty))
if append:
axes.append(ax)
return np.array(axes, object).flat
class PairGrid(Grid):
"""Subplot grid for plotting pairwise relationships in a dataset."""
def __init__(self, data, hue=None, hue_order=None, palette=None,
hue_kws=None, vars=None, x_vars=None, y_vars=None,
diag_sharey=True, size=2.5, aspect=1,
despine=True, dropna=True):
"""Initialize the plot figure and PairGrid object.
Parameters
----------
data : DataFrame
Tidy (long-form) dataframe where each column is a variable and
each row is an observation.
hue : string (variable name), optional
Variable in ``data`` to map plot aspects to different colors.
hue_order : list of strings
Order for the levels of the hue variable in the palette
palette : dict or seaborn color palette
Set of colors for mapping the ``hue`` variable. If a dict, keys
should be values in the ``hue`` variable.
hue_kws : dictionary of param -> list of values mapping
Other keyword arguments to insert into the plotting call to let
other plot attributes vary across levels of the hue variable (e.g.
the markers in a scatterplot).
vars : list of variable names, optional
Variables within ``data`` to use, otherwise use every column with
a numeric datatype.
{x, y}_vars : lists of variable names, optional
Variables within ``data`` to use separately for the rows and
columns of the figure; i.e. to make a non-square plot.
size : scalar, optional
Height (in inches) of each facet.
aspect : scalar, optional
Aspect * size gives the width (in inches) of each facet.
despine : boolean, optional
Remove the top and right spines from the plots.
dropna : boolean, optional
Drop missing values from the data before plotting.
See Also
--------
pairplot : Easily drawing common uses of :class:`PairGrid`.
FacetGrid : Subplot grid for plotting conditional relationships.
Examples
--------
Draw a scatterplot for each pairwise relationship:
.. plot::
:context: close-figs
>>> import matplotlib.pyplot as plt
>>> import seaborn as sns; sns.set()
>>> iris = sns.load_dataset("iris")
>>> g = sns.PairGrid(iris)
>>> g = g.map(plt.scatter)
Show a univariate distribution on the diagonal:
.. plot::
:context: close-figs
>>> g = sns.PairGrid(iris)
>>> g = g.map_diag(plt.hist)
>>> g = g.map_offdiag(plt.scatter)
(It's not actually necessary to catch the return value every time,
as it is the same object, but it makes it easier to deal with the
doctests).
Color the points using a categorical variable:
.. plot::
:context: close-figs
>>> g = sns.PairGrid(iris, hue="species")
>>> g = g.map(plt.scatter)
>>> g = g.add_legend()
Plot a subset of variables
.. plot::
:context: close-figs
>>> g = sns.PairGrid(iris, vars=["sepal_length", "sepal_width"])
>>> g = g.map(plt.scatter)
Pass additional keyword arguments to the functions
.. plot::
:context: close-figs
>>> g = sns.PairGrid(iris)
>>> g = g.map_diag(plt.hist, edgecolor="w")
>>> g = g.map_offdiag(plt.scatter, edgecolor="w", s=40)
Use different variables for the rows and columns:
.. plot::
:context: close-figs
>>> g = sns.PairGrid(iris,
... x_vars=["sepal_length", "sepal_width"],
... y_vars=["petal_length", "petal_width"])
>>> g = g.map(plt.scatter)
Use different functions on the upper and lower triangles:
.. plot::
:context: close-figs
>>> g = sns.PairGrid(iris)
>>> g = g.map_upper(plt.scatter)
>>> g = g.map_lower(sns.kdeplot, cmap="Blues_d")
>>> g = g.map_diag(sns.kdeplot, lw=3, legend=False)
Use different colors and markers for each categorical level:
.. plot::
:context: close-figs
>>> g = sns.PairGrid(iris, hue="species", palette="Set2",
... hue_kws={"marker": ["o", "s", "D"]})
>>> g = g.map(plt.scatter, linewidths=1, edgecolor="w", s=40)
>>> g = g.add_legend()
"""
# Sort out the variables that define the grid
if vars is not None:
x_vars = list(vars)
y_vars = list(vars)
elif (x_vars is not None) or (y_vars is not None):
if (x_vars is None) or (y_vars is None):
raise ValueError("Must specify `x_vars` and `y_vars`")
else:
numeric_cols = self._find_numeric_cols(data)
x_vars = numeric_cols
y_vars = numeric_cols
if np.isscalar(x_vars):
x_vars = [x_vars]
if np.isscalar(y_vars):
y_vars = [y_vars]
self.x_vars = list(x_vars)
self.y_vars = list(y_vars)
self.square_grid = self.x_vars == self.y_vars
# Create the figure and the array of subplots
figsize = len(x_vars) * size * aspect, len(y_vars) * size
fig, axes = plt.subplots(len(y_vars), len(x_vars),
figsize=figsize,
sharex="col", sharey="row",
squeeze=False)
self.fig = fig
self.axes = axes
self.data = data
# Save what we are going to do with the diagonal
self.diag_sharey = diag_sharey
self.diag_axes = None
# Label the axes
self._add_axis_labels()
# Sort out the hue variable
self._hue_var = hue
if hue is None:
self.hue_names = ["_nolegend_"]
self.hue_vals = pd.Series(["_nolegend_"] * len(data),
index=data.index)
else:
hue_names = utils.categorical_order(data[hue], hue_order)
if dropna:
# Filter NA from the list of unique hue names
hue_names = list(filter(pd.notnull, hue_names))
self.hue_names = hue_names
self.hue_vals = data[hue]
# Additional dict of kwarg -> list of values for mapping the hue var
self.hue_kws = hue_kws if hue_kws is not None else {}
self.palette = self._get_palette(data, hue, hue_order, palette)
self._legend_data = {}
# Make the plot look nice
if despine:
utils.despine(fig=fig)
fig.tight_layout()
def map(self, func, **kwargs):
"""Plot with the same function in every subplot.
Parameters
----------
func : callable plotting function
Must take x, y arrays as positional arguments and draw onto the
"currently active" matplotlib Axes.
"""
kw_color = kwargs.pop("color", None)
for i, y_var in enumerate(self.y_vars):
for j, x_var in enumerate(self.x_vars):
hue_grouped = self.data.groupby(self.hue_vals)
for k, label_k in enumerate(self.hue_names):
# Attempt to get data for this level, allowing for empty
try:
data_k = hue_grouped.get_group(label_k)
except KeyError:
data_k = pd.DataFrame(columns=self.data.columns,
dtype=np.float)
ax = self.axes[i, j]
plt.sca(ax)
# Insert the other hue aesthetics if appropriate
for kw, val_list in self.hue_kws.items():
kwargs[kw] = val_list[k]
color = self.palette[k] if kw_color is None else kw_color
func(data_k[x_var], data_k[y_var],
label=label_k, color=color, **kwargs)
self._clean_axis(ax)
self._update_legend_data(ax)
if kw_color is not None:
kwargs["color"] = kw_color
self._add_axis_labels()
return self
def map_diag(self, func, **kwargs):
"""Plot with a univariate function on each diagonal subplot.
Parameters
----------
func : callable plotting function
Must take an x array as a positional arguments and draw onto the
"currently active" matplotlib Axes. There is a special case when
using a ``hue`` variable and ``plt.hist``; the histogram will be
plotted with stacked bars.
"""
# Add special diagonal axes for the univariate plot
if self.square_grid and self.diag_axes is None:
diag_axes = []
for i, (var, ax) in enumerate(zip(self.x_vars,
np.diag(self.axes))):
if i and self.diag_sharey:
diag_ax = ax._make_twin_axes(sharex=ax,
sharey=diag_axes[0],
frameon=False)
else:
diag_ax = ax._make_twin_axes(sharex=ax, frameon=False)
diag_ax.set_axis_off()
diag_axes.append(diag_ax)
self.diag_axes = np.array(diag_axes, np.object)
# Plot on each of the diagonal axes
for i, var in enumerate(self.x_vars):
ax = self.diag_axes[i]
hue_grouped = self.data[var].groupby(self.hue_vals)
# Special-case plt.hist with stacked bars
if func is plt.hist:
plt.sca(ax)
vals = []
for label in self.hue_names:
# Attempt to get data for this level, allowing for empty
try:
vals.append(np.asarray(hue_grouped.get_group(label)))
except KeyError:
vals.append(np.array([]))
func(vals, color=self.palette, histtype="barstacked",
**kwargs)
else:
for k, label_k in enumerate(self.hue_names):
# Attempt to get data for this level, allowing for empty
try:
data_k = hue_grouped.get_group(label_k)
except KeyError:
data_k = np.array([])
plt.sca(ax)
func(data_k, label=label_k,
color=self.palette[k], **kwargs)
self._clean_axis(ax)
self._add_axis_labels()
return self
def map_lower(self, func, **kwargs):
"""Plot with a bivariate function on the lower diagonal subplots.
Parameters
----------
func : callable plotting function
Must take x, y arrays as positional arguments and draw onto the
"currently active" matplotlib Axes.
"""
kw_color = kwargs.pop("color", None)
for i, j in zip(*np.tril_indices_from(self.axes, -1)):
hue_grouped = self.data.groupby(self.hue_vals)
for k, label_k in enumerate(self.hue_names):
# Attempt to get data for this level, allowing for empty
try:
data_k = hue_grouped.get_group(label_k)
except KeyError:
data_k = pd.DataFrame(columns=self.data.columns,
dtype=np.float)
ax = self.axes[i, j]
plt.sca(ax)
x_var = self.x_vars[j]
y_var = self.y_vars[i]
# Insert the other hue aesthetics if appropriate
for kw, val_list in self.hue_kws.items():
kwargs[kw] = val_list[k]
color = self.palette[k] if kw_color is None else kw_color
func(data_k[x_var], data_k[y_var], label=label_k,
color=color, **kwargs)
self._clean_axis(ax)
self._update_legend_data(ax)
if kw_color is not None:
kwargs["color"] = kw_color
self._add_axis_labels()
return self
def map_upper(self, func, **kwargs):
"""Plot with a bivariate function on the upper diagonal subplots.
Parameters
----------
func : callable plotting function
Must take x, y arrays as positional arguments and draw onto the
"currently active" matplotlib Axes.
"""
kw_color = kwargs.pop("color", None)
for i, j in zip(*np.triu_indices_from(self.axes, 1)):
hue_grouped = self.data.groupby(self.hue_vals)
for k, label_k in enumerate(self.hue_names):
# Attempt to get data for this level, allowing for empty
try:
data_k = hue_grouped.get_group(label_k)
except KeyError:
data_k = pd.DataFrame(columns=self.data.columns,
dtype=np.float)
ax = self.axes[i, j]
plt.sca(ax)
x_var = self.x_vars[j]
y_var = self.y_vars[i]
# Insert the other hue aesthetics if appropriate
for kw, val_list in self.hue_kws.items():
kwargs[kw] = val_list[k]
color = self.palette[k] if kw_color is None else kw_color
func(data_k[x_var], data_k[y_var], label=label_k,
color=color, **kwargs)
self._clean_axis(ax)
self._update_legend_data(ax)
if kw_color is not None:
kwargs["color"] = kw_color
return self
def map_offdiag(self, func, **kwargs):
"""Plot with a bivariate function on the off-diagonal subplots.
Parameters
----------
func : callable plotting function
Must take x, y arrays as positional arguments and draw onto the
"currently active" matplotlib Axes.
"""
self.map_lower(func, **kwargs)
self.map_upper(func, **kwargs)
return self
def _add_axis_labels(self):
"""Add labels to the left and bottom Axes."""
for ax, label in zip(self.axes[-1, :], self.x_vars):
ax.set_xlabel(label)
for ax, label in zip(self.axes[:, 0], self.y_vars):
ax.set_ylabel(label)
def _find_numeric_cols(self, data):
"""Find which variables in a DataFrame are numeric."""
# This can't be the best way to do this, but I do not
# know what the best way might be, so this seems ok
numeric_cols = []
for col in data:
try:
data[col].astype(np.float)
numeric_cols.append(col)
except (ValueError, TypeError):
pass
return numeric_cols
class JointGrid(object):
"""Grid for drawing a bivariate plot with marginal univariate plots."""
def __init__(self, x, y, data=None, size=6, ratio=5, space=.2,
dropna=True, xlim=None, ylim=None):
"""Set up the grid of subplots.
Parameters
----------
x, y : strings or vectors
Data or names of variables in ``data``.
data : DataFrame, optional
DataFrame when ``x`` and ``y`` are variable names.
size : numeric
Size of each side of the figure in inches (it will be square).
ratio : numeric
Ratio of joint axes size to marginal axes height.
space : numeric, optional
Space between the joint and marginal axes
dropna : bool, optional
If True, remove observations that are missing from `x` and `y`.
{x, y}lim : two-tuples, optional
Axis limits to set before plotting.
See Also
--------
jointplot : High-level interface for drawing bivariate plots with
several different default plot kinds.
Examples
--------
Initialize the figure but don't draw any plots onto it:
.. plot::
:context: close-figs
>>> import seaborn as sns; sns.set(style="ticks", color_codes=True)
>>> tips = sns.load_dataset("tips")
>>> g = sns.JointGrid(x="total_bill", y="tip", data=tips)
Add plots using default parameters:
.. plot::
:context: close-figs
>>> g = sns.JointGrid(x="total_bill", y="tip", data=tips)
>>> g = g.plot(sns.regplot, sns.distplot)
Draw the join and marginal plots separately, which allows finer-level
control other parameters:
.. plot::
:context: close-figs
>>> import matplotlib.pyplot as plt
>>> g = sns.JointGrid(x="total_bill", y="tip", data=tips)
>>> g = g.plot_joint(plt.scatter, color=".5", edgecolor="white")
>>> g = g.plot_marginals(sns.distplot, kde=False, color=".5")
Draw the two marginal plots separately:
.. plot::
:context: close-figs
>>> import numpy as np
>>> g = sns.JointGrid(x="total_bill", y="tip", data=tips)
>>> g = g.plot_joint(plt.scatter, color="m", edgecolor="white")
>>> _ = g.ax_marg_x.hist(tips["total_bill"], color="b", alpha=.6,
... bins=np.arange(0, 60, 5))
>>> _ = g.ax_marg_y.hist(tips["tip"], color="r", alpha=.6,
... orientation="horizontal",
... bins=np.arange(0, 12, 1))
Add an annotation with a statistic summarizing the bivariate
relationship:
.. plot::
:context: close-figs
>>> from scipy import stats
>>> g = sns.JointGrid(x="total_bill", y="tip", data=tips)
>>> g = g.plot_joint(plt.scatter,
... color="g", s=40, edgecolor="white")
>>> g = g.plot_marginals(sns.distplot, kde=False, color="g")
>>> g = g.annotate(stats.pearsonr)
Use a custom function and formatting for the annotation
.. plot::
:context: close-figs
>>> g = sns.JointGrid(x="total_bill", y="tip", data=tips)
>>> g = g.plot_joint(plt.scatter,
... color="g", s=40, edgecolor="white")
>>> g = g.plot_marginals(sns.distplot, kde=False, color="g")
>>> rsquare = lambda a, b: stats.pearsonr(a, b)[0] ** 2
>>> g = g.annotate(rsquare, template="{stat}: {val:.2f}",
... stat="$R^2$", loc="upper left", fontsize=12)
Remove the space between the joint and marginal axes:
.. plot::
:context: close-figs
>>> g = sns.JointGrid(x="total_bill", y="tip", data=tips, space=0)
>>> g = g.plot_joint(sns.kdeplot, cmap="Blues_d")
>>> g = g.plot_marginals(sns.kdeplot, shade=True)
Draw a smaller plot with relatively larger marginal axes:
.. plot::
:context: close-figs
>>> g = sns.JointGrid(x="total_bill", y="tip", data=tips,
... size=5, ratio=2)
>>> g = g.plot_joint(sns.kdeplot, cmap="Reds_d")
>>> g = g.plot_marginals(sns.kdeplot, color="r", shade=True)
Set limits on the axes:
.. plot::
:context: close-figs
>>> g = sns.JointGrid(x="total_bill", y="tip", data=tips,
... xlim=(0, 50), ylim=(0, 8))
>>> g = g.plot_joint(sns.kdeplot, cmap="Purples_d")
>>> g = g.plot_marginals(sns.kdeplot, color="m", shade=True)
"""
# Set up the subplot grid
f = plt.figure(figsize=(size, size))
gs = plt.GridSpec(ratio + 1, ratio + 1)
ax_joint = f.add_subplot(gs[1:, :-1])
ax_marg_x = f.add_subplot(gs[0, :-1], sharex=ax_joint)
ax_marg_y = f.add_subplot(gs[1:, -1], sharey=ax_joint)
self.fig = f
self.ax_joint = ax_joint
self.ax_marg_x = ax_marg_x
self.ax_marg_y = ax_marg_y
# Turn off tick visibility for the measure axis on the marginal plots
plt.setp(ax_marg_x.get_xticklabels(), visible=False)
plt.setp(ax_marg_y.get_yticklabels(), visible=False)
# Turn off the ticks on the density axis for the marginal plots
plt.setp(ax_marg_x.yaxis.get_majorticklines(), visible=False)
plt.setp(ax_marg_x.yaxis.get_minorticklines(), visible=False)
plt.setp(ax_marg_y.xaxis.get_majorticklines(), visible=False)
plt.setp(ax_marg_y.xaxis.get_minorticklines(), visible=False)
plt.setp(ax_marg_x.get_yticklabels(), visible=False)
plt.setp(ax_marg_y.get_xticklabels(), visible=False)
ax_marg_x.yaxis.grid(False)
ax_marg_y.xaxis.grid(False)
# Possibly extract the variables from a DataFrame
if data is not None:
if x in data:
x = data[x]
if y in data:
y = data[y]
# Possibly drop NA
if dropna:
not_na = pd.notnull(x) & pd.notnull(y)
x = x[not_na]
y = y[not_na]
# Find the names of the variables
if hasattr(x, "name"):
xlabel = x.name
ax_joint.set_xlabel(xlabel)
if hasattr(y, "name"):
ylabel = y.name
ax_joint.set_ylabel(ylabel)
# Convert the x and y data to arrays for plotting
self.x = np.asarray(x)
self.y = np.asarray(y)
if xlim is not None:
ax_joint.set_xlim(xlim)
if ylim is not None:
ax_joint.set_ylim(ylim)
# Make the grid look nice
utils.despine(f)
utils.despine(ax=ax_marg_x, left=True)
utils.despine(ax=ax_marg_y, bottom=True)
f.tight_layout()
f.subplots_adjust(hspace=space, wspace=space)
def plot(self, joint_func, marginal_func, annot_func=None):
"""Shortcut to draw the full plot.
Use `plot_joint` and `plot_marginals` directly for more control.
Parameters
----------
joint_func, marginal_func: callables
Functions to draw the bivariate and univariate plots.
Returns
-------
self : JointGrid instance
Returns `self`.
"""
self.plot_marginals(marginal_func)
self.plot_joint(joint_func)
if annot_func is not None:
self.annotate(annot_func)
return self
def plot_joint(self, func, **kwargs):
"""Draw a bivariate plot of `x` and `y`.
Parameters
----------
func : plotting callable
This must take two 1d arrays of data as the first two
positional arguments, and it must plot on the "current" axes.
kwargs : key, value mappings
Keyword argument are passed to the plotting function.
Returns
-------
self : JointGrid instance
Returns `self`.
"""
plt.sca(self.ax_joint)
func(self.x, self.y, **kwargs)
return self
def plot_marginals(self, func, **kwargs):
"""Draw univariate plots for `x` and `y` separately.
Parameters
----------
func : plotting callable
This must take a 1d array of data as the first positional
argument, it must plot on the "current" axes, and it must
accept a "vertical" keyword argument to orient the measure
dimension of the plot vertically.
kwargs : key, value mappings
Keyword argument are passed to the plotting function.
Returns
-------
self : JointGrid instance
Returns `self`.
"""
kwargs["vertical"] = False
plt.sca(self.ax_marg_x)
func(self.x, **kwargs)
kwargs["vertical"] = True
plt.sca(self.ax_marg_y)
func(self.y, **kwargs)
return self
def annotate(self, func, template=None, stat=None, loc="best", **kwargs):
"""Annotate the plot with a statistic about the relationship.
Parameters
----------
func : callable
Statistical function that maps the x, y vectors either to (val, p)
or to val.
template : string format template, optional
The template must have the format keys "stat" and "val";
if `func` returns a p value, it should also have the key "p".
stat : string, optional
Name to use for the statistic in the annotation, by default it
uses the name of `func`.
loc : string or int, optional
Matplotlib legend location code; used to place the annotation.
kwargs : key, value mappings
Other keyword arguments are passed to `ax.legend`, which formats
the annotation.
Returns
-------
self : JointGrid instance.
Returns `self`.
"""
default_template = "{stat} = {val:.2g}; p = {p:.2g}"
# Call the function and determine the form of the return value(s)
out = func(self.x, self.y)
try:
val, p = out
except TypeError:
val, p = out, None
default_template, _ = default_template.split(";")
# Set the default template
if template is None:
template = default_template
# Default to name of the function
if stat is None:
stat = func.__name__
# Format the annotation
if p is None:
annotation = template.format(stat=stat, val=val)
else:
annotation = template.format(stat=stat, val=val, p=p)
# Draw an invisible plot and use the legend to draw the annotation
# This is a bit of a hack, but `loc=best` works nicely and is not
# easily abstracted.
phantom, = self.ax_joint.plot(self.x, self.y, linestyle="", alpha=0)
self.ax_joint.legend([phantom], [annotation], loc=loc, **kwargs)
phantom.remove()
return self
def set_axis_labels(self, xlabel="", ylabel="", **kwargs):
"""Set the axis labels on the bivariate axes.
Parameters
----------
xlabel, ylabel : strings
Label names for the x and y variables.
kwargs : key, value mappings
Other keyword arguments are passed to the set_xlabel or
set_ylabel.
Returns
-------
self : JointGrid instance
returns `self`
"""
self.ax_joint.set_xlabel(xlabel, **kwargs)
self.ax_joint.set_ylabel(ylabel, **kwargs)
return self
def savefig(self, *args, **kwargs):
"""Wrap figure.savefig defaulting to tight bounding box."""
kwargs.setdefault("bbox_inches", "tight")
self.fig.savefig(*args, **kwargs)
| bsd-3-clause |
sinhrks/scikit-learn | examples/gaussian_process/plot_gpr_noisy.py | 104 | 3778 | """
=============================================================
Gaussian process regression (GPR) with noise-level estimation
=============================================================
This example illustrates that GPR with a sum-kernel including a WhiteKernel can
estimate the noise level of data. An illustration of the
log-marginal-likelihood (LML) landscape shows that there exist two local
maxima of LML. The first corresponds to a model with a high noise level and a
large length scale, which explains all variations in the data by noise. The
second one has a smaller noise level and shorter length scale, which explains
most of the variation by the noise-free functional relationship. The second
model has a higher likelihood; however, depending on the initial value for the
hyperparameters, the gradient-based optimization might also converge to the
high-noise solution. It is thus important to repeat the optimization several
times for different initializations.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import LogNorm
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, WhiteKernel
rng = np.random.RandomState(0)
X = rng.uniform(0, 5, 20)[:, np.newaxis]
y = 0.5 * np.sin(3 * X[:, 0]) + rng.normal(0, 0.5, X.shape[0])
# First run
plt.figure(0)
kernel = 1.0 * RBF(length_scale=100.0, length_scale_bounds=(1e-2, 1e3)) \
+ WhiteKernel(noise_level=1, noise_level_bounds=(1e-10, 1e+1))
gp = GaussianProcessRegressor(kernel=kernel,
alpha=0.0).fit(X, y)
X_ = np.linspace(0, 5, 100)
y_mean, y_cov = gp.predict(X_[:, np.newaxis], return_cov=True)
plt.plot(X_, y_mean, 'k', lw=3, zorder=9)
plt.fill_between(X_, y_mean - np.sqrt(np.diag(y_cov)),
y_mean + np.sqrt(np.diag(y_cov)),
alpha=0.5, color='k')
plt.plot(X_, 0.5*np.sin(3*X_), 'r', lw=3, zorder=9)
plt.scatter(X[:, 0], y, c='r', s=50, zorder=10)
plt.title("Initial: %s\nOptimum: %s\nLog-Marginal-Likelihood: %s"
% (kernel, gp.kernel_,
gp.log_marginal_likelihood(gp.kernel_.theta)))
plt.tight_layout()
# Second run
plt.figure(1)
kernel = 1.0 * RBF(length_scale=1.0, length_scale_bounds=(1e-2, 1e3)) \
+ WhiteKernel(noise_level=1e-5, noise_level_bounds=(1e-10, 1e+1))
gp = GaussianProcessRegressor(kernel=kernel,
alpha=0.0).fit(X, y)
X_ = np.linspace(0, 5, 100)
y_mean, y_cov = gp.predict(X_[:, np.newaxis], return_cov=True)
plt.plot(X_, y_mean, 'k', lw=3, zorder=9)
plt.fill_between(X_, y_mean - np.sqrt(np.diag(y_cov)),
y_mean + np.sqrt(np.diag(y_cov)),
alpha=0.5, color='k')
plt.plot(X_, 0.5*np.sin(3*X_), 'r', lw=3, zorder=9)
plt.scatter(X[:, 0], y, c='r', s=50, zorder=10)
plt.title("Initial: %s\nOptimum: %s\nLog-Marginal-Likelihood: %s"
% (kernel, gp.kernel_,
gp.log_marginal_likelihood(gp.kernel_.theta)))
plt.tight_layout()
# Plot LML landscape
plt.figure(2)
theta0 = np.logspace(-2, 3, 49)
theta1 = np.logspace(-2, 0, 50)
Theta0, Theta1 = np.meshgrid(theta0, theta1)
LML = [[gp.log_marginal_likelihood(np.log([0.36, Theta0[i, j], Theta1[i, j]]))
for i in range(Theta0.shape[0])] for j in range(Theta0.shape[1])]
LML = np.array(LML).T
vmin, vmax = (-LML).min(), (-LML).max()
vmax = 50
plt.contour(Theta0, Theta1, -LML,
levels=np.logspace(np.log10(vmin), np.log10(vmax), 50),
norm=LogNorm(vmin=vmin, vmax=vmax))
plt.colorbar()
plt.xscale("log")
plt.yscale("log")
plt.xlabel("Length-scale")
plt.ylabel("Noise-level")
plt.title("Log-marginal-likelihood")
plt.tight_layout()
plt.show()
| bsd-3-clause |
lo-co/atm-py | build/lib/atmPy/atmos/atmosphere_standards.py | 3 | 2597 | # -*- coding: utf-8 -*-
"""
This module contains atmospheric constands and standards.
@author: Hagen
"""
import numpy as np
import pandas as pd
from scipy.interpolate import interp1d
def standard_atmosphere(value, quantity='altitude', standard='international', return_standard=False):
"""Returns pressure, temperature, and/or altitude as a function of pressure, or altitude for the standard international atmosphere
Arguments
---------
value: float or ndarray.
Depending on the keyword "quantity" this is:
- altitude in meters.
- pressure in mbar.
quantity: 'altitude' or 'pressure'.
quantaty of the argument value.
standard: 'US' or 'international'.
defines which standard is used.
return_standard: bool, optional.
if True argument "value" and "quantity" are ignored and a pandas dataTable with the standard is returned.
Returns
-------
tuple of two floats or two ndarrays depending on type of h:
First quantaty the tuple is pressure in mbar or altitude in meter, second is temperatur in Kelvin.
"""
if standard == 'international':
alt = np.array([-610, 11000, 20000, 32000, 47000, 51000, 71000, 84852]).astype(float)
pressure = np.array([108900, 22632, 5474.9, 868.02, 110.91, 66.939, 3.9564, 0.3734]) / 100.
tmp = np.array([19, -56.5, -56.5, -44.5, -2.5, -2.5, -58.5, -86.28]) + 273.15
elif standard == 'US':
alt = np.array([0, 11000, 20000, 32000, 47000, 51000, 71000]).astype(float)
pressure = np.array([101325, 22632.1, 5474.89, 868.019, 110.906, 66.9389, 3.95642]) / 100.
tmp = np.array([288.15, 216.65, 216.65, 228.65, 270.65, 270.65, 214.65])
else:
raise TypeError('No standard with the name "%s" is defined' % standard)
if return_standard:
return pd.DataFrame(np.array([alt, pressure, tmp]).transpose(),
columns=['Altitude_meter', 'Pressure_mbar', 'Temperature_K'])
if quantity == 'altitude':
pressure_int = interp1d(alt, np.log(pressure), kind='cubic')
press_n = np.exp(pressure_int(value))
out = press_n
elif quantity == 'pressure':
alt_int = interp1d(np.log(pressure), alt, kind='cubic')
alt_n = alt_int(np.log(value))
out = alt_n
value = alt_n
else:
raise TypeError('Quantity "$s$" is not an option' % quantity)
tmp_int = interp1d(alt, tmp, kind='linear')
tmp_n = tmp_int(value)
return out, tmp_n
| mit |
bthirion/scikit-learn | benchmarks/bench_plot_incremental_pca.py | 374 | 6430 | """
========================
IncrementalPCA benchmark
========================
Benchmarks for IncrementalPCA
"""
import numpy as np
import gc
from time import time
from collections import defaultdict
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_lfw_people
from sklearn.decomposition import IncrementalPCA, RandomizedPCA, PCA
def plot_results(X, y, label):
plt.plot(X, y, label=label, marker='o')
def benchmark(estimator, data):
gc.collect()
print("Benching %s" % estimator)
t0 = time()
estimator.fit(data)
training_time = time() - t0
data_t = estimator.transform(data)
data_r = estimator.inverse_transform(data_t)
reconstruction_error = np.mean(np.abs(data - data_r))
return {'time': training_time, 'error': reconstruction_error}
def plot_feature_times(all_times, batch_size, all_components, data):
plt.figure()
plot_results(all_components, all_times['pca'], label="PCA")
plot_results(all_components, all_times['ipca'],
label="IncrementalPCA, bsize=%i" % batch_size)
plot_results(all_components, all_times['rpca'], label="RandomizedPCA")
plt.legend(loc="upper left")
plt.suptitle("Algorithm runtime vs. n_components\n \
LFW, size %i x %i" % data.shape)
plt.xlabel("Number of components (out of max %i)" % data.shape[1])
plt.ylabel("Time (seconds)")
def plot_feature_errors(all_errors, batch_size, all_components, data):
plt.figure()
plot_results(all_components, all_errors['pca'], label="PCA")
plot_results(all_components, all_errors['ipca'],
label="IncrementalPCA, bsize=%i" % batch_size)
plot_results(all_components, all_errors['rpca'], label="RandomizedPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm error vs. n_components\n"
"LFW, size %i x %i" % data.shape)
plt.xlabel("Number of components (out of max %i)" % data.shape[1])
plt.ylabel("Mean absolute error")
def plot_batch_times(all_times, n_features, all_batch_sizes, data):
plt.figure()
plot_results(all_batch_sizes, all_times['pca'], label="PCA")
plot_results(all_batch_sizes, all_times['rpca'], label="RandomizedPCA")
plot_results(all_batch_sizes, all_times['ipca'], label="IncrementalPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm runtime vs. batch_size for n_components %i\n \
LFW, size %i x %i" % (
n_features, data.shape[0], data.shape[1]))
plt.xlabel("Batch size")
plt.ylabel("Time (seconds)")
def plot_batch_errors(all_errors, n_features, all_batch_sizes, data):
plt.figure()
plot_results(all_batch_sizes, all_errors['pca'], label="PCA")
plot_results(all_batch_sizes, all_errors['ipca'], label="IncrementalPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm error vs. batch_size for n_components %i\n \
LFW, size %i x %i" % (
n_features, data.shape[0], data.shape[1]))
plt.xlabel("Batch size")
plt.ylabel("Mean absolute error")
def fixed_batch_size_comparison(data):
all_features = [i.astype(int) for i in np.linspace(data.shape[1] // 10,
data.shape[1], num=5)]
batch_size = 1000
# Compare runtimes and error for fixed batch size
all_times = defaultdict(list)
all_errors = defaultdict(list)
for n_components in all_features:
pca = PCA(n_components=n_components)
rpca = RandomizedPCA(n_components=n_components, random_state=1999)
ipca = IncrementalPCA(n_components=n_components, batch_size=batch_size)
results_dict = {k: benchmark(est, data) for k, est in [('pca', pca),
('ipca', ipca),
('rpca', rpca)]}
for k in sorted(results_dict.keys()):
all_times[k].append(results_dict[k]['time'])
all_errors[k].append(results_dict[k]['error'])
plot_feature_times(all_times, batch_size, all_features, data)
plot_feature_errors(all_errors, batch_size, all_features, data)
def variable_batch_size_comparison(data):
batch_sizes = [i.astype(int) for i in np.linspace(data.shape[0] // 10,
data.shape[0], num=10)]
for n_components in [i.astype(int) for i in
np.linspace(data.shape[1] // 10,
data.shape[1], num=4)]:
all_times = defaultdict(list)
all_errors = defaultdict(list)
pca = PCA(n_components=n_components)
rpca = RandomizedPCA(n_components=n_components, random_state=1999)
results_dict = {k: benchmark(est, data) for k, est in [('pca', pca),
('rpca', rpca)]}
# Create flat baselines to compare the variation over batch size
all_times['pca'].extend([results_dict['pca']['time']] *
len(batch_sizes))
all_errors['pca'].extend([results_dict['pca']['error']] *
len(batch_sizes))
all_times['rpca'].extend([results_dict['rpca']['time']] *
len(batch_sizes))
all_errors['rpca'].extend([results_dict['rpca']['error']] *
len(batch_sizes))
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=n_components,
batch_size=batch_size)
results_dict = {k: benchmark(est, data) for k, est in [('ipca',
ipca)]}
all_times['ipca'].append(results_dict['ipca']['time'])
all_errors['ipca'].append(results_dict['ipca']['error'])
plot_batch_times(all_times, n_components, batch_sizes, data)
# RandomizedPCA error is always worse (approx 100x) than other PCA
# tests
plot_batch_errors(all_errors, n_components, batch_sizes, data)
faces = fetch_lfw_people(resize=.2, min_faces_per_person=5)
# limit dataset to 5000 people (don't care who they are!)
X = faces.data[:5000]
n_samples, h, w = faces.images.shape
n_features = X.shape[1]
X -= X.mean(axis=0)
X /= X.std(axis=0)
fixed_batch_size_comparison(X)
variable_batch_size_comparison(X)
plt.show()
| bsd-3-clause |
the13fools/Bokeh_Examples | plotting/file/burtin.py | 3 | 4722 | import numpy as np
import pandas as pd
from bokeh.plotting import *
from six.moves import cStringIO as StringIO
from math import log, sqrt
from collections import OrderedDict
antibiotics = """
bacteria, penicillin, streptomycin, neomycin, gram
Mycobacterium tuberculosis, 800, 5, 2, negative
Salmonella schottmuelleri, 10, 0.8, 0.09, negative
Proteus vulgaris, 3, 0.1, 0.1, negative
Klebsiella pneumoniae, 850, 1.2, 1, negative
Brucella abortus, 1, 2, 0.02, negative
Pseudomonas aeruginosa, 850, 2, 0.4, negative
Escherichia coli, 100, 0.4, 0.1, negative
Salmonella (Eberthella) typhosa, 1, 0.4, 0.008, negative
Aerobacter aerogenes, 870, 1, 1.6, negative
Brucella antracis, 0.001, 0.01, 0.007, positive
Streptococcus fecalis, 1, 1, 0.1, positive
Staphylococcus aureus, 0.03, 0.03, 0.001, positive
Staphylococcus albus, 0.007, 0.1, 0.001, positive
Streptococcus hemolyticus, 0.001, 14, 10, positive
Streptococcus viridans, 0.005, 10, 40, positive
Diplococcus pneumoniae, 0.005, 11, 10, positive
"""
drug_color = OrderedDict([
("Penicillin", "#0d3362"),
("Streptomycin", "#c64737"),
("Neomycin", "black" ),
])
gram_color = {
"positive" : "#aeaeb8",
"negative" : "#e69584",
}
df = pd.read_csv(StringIO(antibiotics), skiprows=1, skipinitialspace=True)
width = 800
height = 800
inner_radius = 90
outer_radius = 300 - 10
minr = sqrt(log(.001 * 1E4))
maxr = sqrt(log(1000 * 1E4))
a = (outer_radius - inner_radius) / (minr - maxr)
b = inner_radius - a * maxr
def rad(mic):
return a * np.sqrt(np.log(mic * 1E4)) + b
big_angle = 2.0 * np.pi / (len(df) + 1)
small_angle = big_angle / 7
output_file("burtin.html", title="burtin.py example")
hold()
x = np.zeros(len(df))
y = np.zeros(len(df))
figure(plot_width=width, plot_height=height, title="",
tools="pan,wheel_zoom,box_zoom,reset,previewsave",
x_axis_type=None, y_axis_type=None,
x_range=[-420, 420], y_range=[-420, 420],
min_border=0, outline_line_color=None,
background_fill="#f0e1d2", border_fill="#f0e1d2")
line(x+1, y+1, alpha=0)
# annular wedges
angles = np.pi/2 - big_angle/2 - df.index*big_angle
colors = [gram_color[gram] for gram in df.gram]
annular_wedge(
x, y, inner_radius, outer_radius, -big_angle+angles, angles, color=colors,
)
# small wedges
annular_wedge(
x, y, inner_radius, rad(df.penicillin), -big_angle+angles + 5*small_angle, -big_angle+angles+6*small_angle, color=drug_color['Penicillin'],
)
annular_wedge(
x, y, inner_radius, rad(df.streptomycin), -big_angle+angles + 3*small_angle, -big_angle+angles+4*small_angle, color=drug_color['Streptomycin'],
)
annular_wedge(
x, y, inner_radius, rad(df.neomycin), -big_angle+angles + 1*small_angle, -big_angle+angles+2*small_angle, color=drug_color['Neomycin'],
)
# circular axes and lables
labels = np.power(10.0, np.arange(-3, 4))
radii = a * np.sqrt(np.log(labels * 1E4)) + b
circle(x, y, radius=radii, fill_color=None, line_color="white")
text(x[:-1], radii[:-1], [str(r) for r in labels[:-1]], angle=0, text_font_size="8pt", text_align="center", text_baseline="middle")
# radial axes
annular_wedge(
x, y, inner_radius-10, outer_radius+10, -big_angle+angles, -big_angle+angles, color="black",
)
# bacteria labels
xr = radii[0]*np.cos(np.array(-big_angle/2 + angles))
yr = radii[0]*np.sin(np.array(-big_angle/2 + angles))
label_angle=np.array(-big_angle/2+angles)
label_angle[label_angle < -np.pi/2] += np.pi # easier to read labels on the left side
text(xr, yr, df.bacteria, angle=label_angle, text_font_size="9pt", text_align="center", text_baseline="middle")
# OK, these hand drawn legends are pretty clunky, will be improved in future release
circle([-40, -40], [-370, -390], color=list(gram_color.values()), radius=5)
text([-30, -30], [-370, -390], text=["Gram-" + x for x in gram_color.keys()], angle=0, text_font_size="7pt", text_align="left", text_baseline="middle")
rect([-40, -40, -40], [18, 0, -18], width=30, height=13,
color=list(drug_color.values()))
text([-15, -15, -15], [18, 0, -18], text=list(drug_color.keys()), angle=0, text_font_size="9pt", text_align="left", text_baseline="middle")
xgrid().grid_line_color = None
ygrid().grid_line_color = None
show()
| bsd-3-clause |
cdek11/PLS | PLS_Algorithm_Optimized.py | 2 | 5817 |
# coding: utf-8
# In[2]:
# Code to implement the optimized version of the PLS Algorithm
import pandas as pd
import numpy as np
import numba
from numba import jit
@jit
def mean_center_scale(dataframe):
'''Scale dataframe by subtracting mean and dividing by standard deviation'''
dataframe = dataframe - dataframe.mean()
dataframe = dataframe/dataframe.std()
return dataframe
@jit
def y_pred(Y_pred, i,b_dictionary,t_hat_dictionary,q_new_dictionary):
'''Find prediction for Y based on the number of components in this iteration'''
for j in range(1,i+1):
Y_pred = Y_pred + (b_dictionary[j]*t_hat_dictionary[j]).dot(q_new_dictionary[j].T)
return Y_pred
@jit
def rmse(i,Y_true, Y_pred, response_std, RMSE_dictionary):
'''Find training RMSE'''
RMSE = np.sqrt(sum((Y_true - Y_pred)**2)/Y_true.shape[0])
RMSE_scaled = RMSE * response_std
RMSE_dictionary[i] = RMSE_scaled
return RMSE_dictionary
@jit
def core_pls(i,Y, X, q_new_dictionary, b_dictionary, t_hat_dictionary) :
'''Core PLS algorithm'''
#Here we have one variable in the Y block so q = 1
#and omit steps 5-8
q = 1
#For the X block, u = Y
u = Y #random y column from Y #Step 1
w_old = np.dot(u.T,X)/np.dot(u.T,u) #Step 2
w_new = w_old/np.linalg.norm(w_old) #Step 3
t = np.dot(X,w_new.T)/np.dot(w_new,w_new.T) #Step 4
#For the Y block can be omitted if Y only has one variable
q_old = np.dot(t.T,Y)/np.dot(t.T,t) #Step 5
q_new = q_old/np.linalg.norm(q_old) #Step 6
q_new_dictionary[i] = q_new
u = np.dot(Y,q_new.T)/np.dot(q_new,q_new.T) #Step 7
#Step 8: Check convergence
#Calculate the X loadings and rescale the scores and weights accordingly
p = np.dot(t.T,X)/np.dot(t.T,t) #Step 9
p_new = p.T/np.linalg.norm(p.T) #Step 10
t_new = t/np.linalg.norm(p.T) #Step 11
w_new = w_old/np.linalg.norm(p) #Step 12
#Find the regression coefficient for b for th inner relation
b = np.dot(u.T,t_new)/np.dot(t.T,t) #Step 13
b_dictionary[i] = b
#Calculation of the residuals
E_h = X - np.dot(t_new,p_new.T)
F_h = Y - b.dot(t_new.T).T.dot(q) #WORKS BUT IS THIS RIGHT?
#Set outer relation for the X block
#Xres_dictionary[i] = E_h #MAYBE REMOVE
X = E_h
#Set the mixed relation for the Y block
#Yres_dictionary[i] = F_h 3MAYBE REMOVE
Y = F_h
#Find estimated t hat
t_hat = np.dot(E_h,w_new.T)
t_hat_dictionary[i] = t_hat
E_h = E_h - np.dot(t_hat,p_new.T)
return X,Y, u, w_new, q_new, t_new, p_new, q_new_dictionary, t_hat_dictionary, b_dictionary,E_h, F_h
def pls_optimized(path, path_test, predictors, response):
'''Function that takes a dataframe and runs partial least squares on numeric predictors for a numeric response.
Returns the residuals of the predictor (X block), response (Y block), and traininig RMSE'''
###TRAINING DATA
combined = predictors
#Load data
data = pd.DataFrame.from_csv(path)
combined.append(response)
data = data[combined]
response_std = data[response].std()
#Subtract the mean and scale each column
data = mean_center_scale(data)
#Separate in to design matrix (X block) and response column vector (Y block)
predictors.pop()
X = data[predictors].as_matrix()
Y = data[[response]].as_matrix()
Y_true = Y #For prediction
#Get rank of matrix
rank = np.linalg.matrix_rank(X)
u = Y #set initial u as Y
Xres_dictionary = {}
Yres_dictionary = {}
q_new_dictionary ={}
b_dictionary = {}
t_hat_dictionary = {}
t_hat_train_dictionary = {}
t_hat_test_dictionary = {}
RMSE_dictionary = {}
RMSE_test_dictionary = {}
###TEST DATA
#Load data
data_test = pd.DataFrame.from_csv(path_test)
combined.append(response)
data_test = data_test[combined]
response_std_test = data_test[response].std()
#Subtract the mean and scale each column
data_test = mean_center_scale(data_test)
#Separate in to design matrix (X block) and response column vector (Y block)
predictors.pop()
X_test = data[predictors].as_matrix()
Y_test = data[[response]].as_matrix()
Y_true_test = Y_test #For prediction
#Get rank of matrix
rank_test = np.linalg.matrix_rank(X_test)
#Iterate through each component
for i in range(1,(rank+1)):
Y_pred = np.zeros((Y_true.shape[0],1))
Y_pred_test = np.zeros((Y_true_test.shape[0],1))
#Core algo
X,Y, u, w_new, q_new, t_new, p_new, q_new_dictionary, t_hat_dictionary, b_dictionary,E_h, F_h = core_pls(i,Y, X, q_new_dictionary, b_dictionary, t_hat_dictionary)
#NEW Sum over different compenents
for g in range(1,i+1):
t_hat_train = np.dot(E_h,w_new.T)
t_hat_train_dictionary[g] = t_hat_train
E_h = E_h - np.dot(t_hat_train, p_new.T)
Y_pred = y_pred(Y_pred, g,b_dictionary,t_hat_dictionary,q_new_dictionary)
#Find training RMSE
RMSE_dictionary = rmse(i,Y_true, Y_pred, response_std, RMSE_dictionary)
#Set initial E_h as X_test data
E_h_test = X_test
#Sum over different compenents
for k in range(1,i+1):
t_hat_test = np.dot(E_h_test,w_new.T)
t_hat_test_dictionary[k] = t_hat_test
E_h_test = E_h_test - np.dot(t_hat_test, p_new.T)
Y_pred_test = y_pred(Y_pred_test, k,b_dictionary,t_hat_test_dictionary,q_new_dictionary)
#Find test RMSE
RMSE_test_dictionary = rmse(i,Y_true_test, Y_pred_test, response_std_test, RMSE_test_dictionary)
return RMSE_dictionary, RMSE_test_dictionary
| mit |
lukeshingles/artistools | artistools/writebollightcurvedata.py | 1 | 3110 | #!/usr/bin/env python3
import artistools as at
import artistools.spectra
from pathlib import Path
import numpy as np
import pandas as pd
from astropy import units as u
def get_bol_lc_from_spec(modelpath):
res_specdata = at.spectra.read_specpol_res(modelpath)
# print(res_specdata)
timearray = res_specdata[0].columns.values[1:]
times = [time for time in timearray if 5 < float(time) < 80]
lightcurvedata = {'time': times}
for angle in range(len(res_specdata)):
bol_luminosity = []
for timestep, time in enumerate(timearray):
time = float(time)
if 5 < time < 80:
spectrum = at.spectra.get_res_spectrum(modelpath, timestep, timestep, angle=angle,
res_specdata=res_specdata)
integrated_flux = np.trapz(spectrum['f_lambda'], spectrum['lambda_angstroms'])
integrated_luminosity = integrated_flux * 4 * np.pi * np.power(u.Mpc.to('cm'), 2)
bol_luminosity.append(integrated_luminosity)
lightcurvedata[f'angle={angle}'] = np.log10(bol_luminosity)
lightcurvedataframe = pd.DataFrame(lightcurvedata)
lightcurvedataframe = lightcurvedataframe.replace([np.inf, -np.inf], 0)
print(lightcurvedataframe)
return lightcurvedataframe
def get_bol_lc_from_lightcurveout(modelpath):
lcdata = pd.read_csv(modelpath / "light_curve_res.out", delim_whitespace=True, header=None, names=['time', 'lum', 'lum_cmf'])
lcdataframes = at.gather_res_data(lcdata, index_of_repeated_value=0)
times = lcdataframes[0]['time']
lightcurvedata = {'time': times}
for angle in range(len(lcdataframes)):
lcdata = lcdataframes[angle]
bol_luminosity = np.array(lcdata['lum']) * 3.826e33 # Luminosity in erg/s
lightcurvedata[f'angle={angle}'] = np.log10(bol_luminosity)
lightcurvedataframe = pd.DataFrame(lightcurvedata)
lightcurvedataframe = lightcurvedataframe.replace([np.inf, -np.inf], 0)
print(lightcurvedataframe)
return lightcurvedataframe
# modelnames = ['M08_03', 'M08_05', 'M08_10', 'M09_03', 'M09_05', 'M09_10',
# 'M10_02_end55', 'M10_03', 'M10_05', 'M10_10', 'M11_05_1']
modelnames = ['M2a']
for modelname in modelnames:
# modelpath = Path("/Users/ccollins/harddrive4TB/parameterstudy") / Path(modelname)
modelpath = Path("/Users/ccollins/harddrive4TB/Gronow2020") / Path(modelname)
outfilepath = Path("/Users/ccollins/Desktop/bollightcurvedata")
# lightcurvedataframe = get_bol_lc_from_spec(modelpath)
lightcurvedataframe = get_bol_lc_from_lightcurveout(modelpath)
lightcurvedataframe.to_csv(outfilepath / f"bol_lightcurvedata_{modelname}.txt", sep=' ', index=False, header=False)
with open(outfilepath / f"bol_lightcurvedata_{modelname}.txt", 'r+') as f: # add comment to start of file
content = f.read()
f.seek(0, 0)
f.write("# 1st col is time in days. Next columns are log10(luminosity) for each model viewing angle".rstrip('\r\n')
+ '\n' + content)
print("done")
| mit |
wkal/brain4k | setup.py | 2 | 1067 | from distutils.core import setup
from setuptools import find_packages
setup(
name = 'brain4k',
packages = find_packages(),
include_package_data = True,
version = '0.1',
description = 'A framework for machine learning pipelines',
author = 'Robert Kyle',
author_email = '[email protected]',
url = 'https://github.com/shuggiefisher/brain4k',
download_url = 'https://github.com/shuggiefisher/brain4k/tarball/0.1',
keywords = ['machine', 'learning', 'pipeline', 'deep', 'neural', 'network'],
classifiers = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
install_requires = ['numpy', 'pandas', 'h5py', 'jinja2'],
entry_points = {
'console_scripts': ['brain4k = brain4k.brain4k:run'],
},
) | apache-2.0 |
wlamond/scikit-learn | benchmarks/bench_random_projections.py | 397 | 8900 | """
===========================
Random projection benchmark
===========================
Benchmarks for random projections.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import collections
import numpy as np
import scipy.sparse as sp
from sklearn import clone
from sklearn.externals.six.moves import xrange
from sklearn.random_projection import (SparseRandomProjection,
GaussianRandomProjection,
johnson_lindenstrauss_min_dim)
def type_auto_or_float(val):
if val == "auto":
return "auto"
else:
return float(val)
def type_auto_or_int(val):
if val == "auto":
return "auto"
else:
return int(val)
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_scikit_transformer(X, transfomer):
gc.collect()
clf = clone(transfomer)
# start time
t_start = datetime.now()
clf.fit(X)
delta = (datetime.now() - t_start)
# stop time
time_to_fit = compute_time(t_start, delta)
# start time
t_start = datetime.now()
clf.transform(X)
delta = (datetime.now() - t_start)
# stop time
time_to_transform = compute_time(t_start, delta)
return time_to_fit, time_to_transform
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros,
random_state=None):
rng = np.random.RandomState(random_state)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def print_row(clf_type, time_fit, time_transform):
print("%s | %s | %s" % (clf_type.ljust(30),
("%.4fs" % time_fit).center(12),
("%.4fs" % time_transform).center(12)))
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-features",
dest="n_features", default=10 ** 4, type=int,
help="Number of features in the benchmarks")
op.add_option("--n-components",
dest="n_components", default="auto",
help="Size of the random subspace."
" ('auto' or int > 0)")
op.add_option("--ratio-nonzeros",
dest="ratio_nonzeros", default=10 ** -3, type=float,
help="Number of features in the benchmarks")
op.add_option("--n-samples",
dest="n_samples", default=500, type=int,
help="Number of samples in the benchmarks")
op.add_option("--random-seed",
dest="random_seed", default=13, type=int,
help="Seed used by the random number generators.")
op.add_option("--density",
dest="density", default=1 / 3,
help="Density used by the sparse random projection."
" ('auto' or float (0.0, 1.0]")
op.add_option("--eps",
dest="eps", default=0.5, type=float,
help="See the documentation of the underlying transformers.")
op.add_option("--transformers",
dest="selected_transformers",
default='GaussianRandomProjection,SparseRandomProjection',
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. Available: "
"GaussianRandomProjection,SparseRandomProjection")
op.add_option("--dense",
dest="dense",
default=False,
action="store_true",
help="Set input space as a dense matrix.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
opts.n_components = type_auto_or_int(opts.n_components)
opts.density = type_auto_or_float(opts.density)
selected_transformers = opts.selected_transformers.split(',')
###########################################################################
# Generate dataset
###########################################################################
n_nonzeros = int(opts.ratio_nonzeros * opts.n_features)
print('Dataset statics')
print("===========================")
print('n_samples \t= %s' % opts.n_samples)
print('n_features \t= %s' % opts.n_features)
if opts.n_components == "auto":
print('n_components \t= %s (auto)' %
johnson_lindenstrauss_min_dim(n_samples=opts.n_samples,
eps=opts.eps))
else:
print('n_components \t= %s' % opts.n_components)
print('n_elements \t= %s' % (opts.n_features * opts.n_samples))
print('n_nonzeros \t= %s per feature' % n_nonzeros)
print('ratio_nonzeros \t= %s' % opts.ratio_nonzeros)
print('')
###########################################################################
# Set transformer input
###########################################################################
transformers = {}
###########################################################################
# Set GaussianRandomProjection input
gaussian_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed
}
transformers["GaussianRandomProjection"] = \
GaussianRandomProjection(**gaussian_matrix_params)
###########################################################################
# Set SparseRandomProjection input
sparse_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed,
"density": opts.density,
"eps": opts.eps,
}
transformers["SparseRandomProjection"] = \
SparseRandomProjection(**sparse_matrix_params)
###########################################################################
# Perform benchmark
###########################################################################
time_fit = collections.defaultdict(list)
time_transform = collections.defaultdict(list)
print('Benchmarks')
print("===========================")
print("Generate dataset benchmarks... ", end="")
X_dense, X_sparse = make_sparse_random_data(opts.n_samples,
opts.n_features,
n_nonzeros,
random_state=opts.random_seed)
X = X_dense if opts.dense else X_sparse
print("done")
for name in selected_transformers:
print("Perform benchmarks for %s..." % name)
for iteration in xrange(opts.n_times):
print("\titer %s..." % iteration, end="")
time_to_fit, time_to_transform = bench_scikit_transformer(X_dense,
transformers[name])
time_fit[name].append(time_to_fit)
time_transform[name].append(time_to_transform)
print("done")
print("")
###########################################################################
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Transformer performance:")
print("===========================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
print("%s | %s | %s" % ("Transformer".ljust(30),
"fit".center(12),
"transform".center(12)))
print(31 * "-" + ("|" + "-" * 14) * 2)
for name in sorted(selected_transformers):
print_row(name,
np.mean(time_fit[name]),
np.mean(time_transform[name]))
print("")
print("")
| bsd-3-clause |
leggitta/mne-python | doc/conf.py | 7 | 9408 | # -*- coding: utf-8 -*-
#
# MNE documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 11 10:45:48 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import os.path as op
from datetime import date
import sphinxgallery
import sphinx_bootstrap_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
curdir = op.dirname(__file__)
sys.path.append(op.abspath(op.join(curdir, '..', 'mne')))
sys.path.append(op.abspath(op.join(curdir, 'sphinxext')))
import mne
# -- General configuration ------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
import numpy_ext.numpydoc
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.pngmath',
'sphinx.ext.mathjax',
'numpy_ext.numpydoc',
# 'sphinx.ext.intersphinx',
# 'flow_diagram',
'sphinxgallery.gen_gallery']
autosummary_generate = True
autodoc_default_flags = ['inherited-members']
# extensions = ['sphinx.ext.autodoc',
# 'sphinx.ext.doctest',
# 'sphinx.ext.todo',
# 'sphinx.ext.pngmath',
# 'sphinx.ext.inheritance_diagram',
# 'numpydoc',
# 'ipython_console_highlighting',
# 'only_directives']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'MNE'
copyright = u'2012-%s, MNE Developers' % date.today().year
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = mne.__version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
unused_docs = ['config_doc.rst']
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
exclude_patterns = ['source/generated']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['mne.']
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'bootstrap'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'navbar_title': ' ',
'source_link_position': "footer",
'bootswatch_theme': "flatly",
'navbar_sidebarrel': False,
'bootstrap_version': "3",
'navbar_links': [("Tutorials", "tutorials"),
("Gallery", "auto_examples/index"),
("Manual", "manual/index"),
("API", "python_reference"),
("FAQ", "faq"),
("Cite", "cite"),
],
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "_static/mne_logo_small.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static', '_images', sphinxgallery.glr_path_static()]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# variables to pass to HTML templating engine
build_dev_html = bool(int(os.environ.get('BUILD_DEV_HTML', False)))
html_context = {'use_google_analytics': True, 'use_twitter': True,
'use_media_buttons': True, 'build_dev_html': build_dev_html}
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'mne-doc'
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
# ('index', 'MNE.tex', u'MNE Manual',
# u'MNE Contributors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "_static/logo.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
latex_use_parts = True
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
latex_use_modindex = True
trim_doctests_flags = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
sphinxgallery_conf = {
'examples_dirs' : ['../examples', '../tutorials'],
'gallery_dirs' : ['auto_examples', 'auto_tutorials'],
'doc_module': ('sphinxgallery', 'numpy'),
'reference_url': {
'mne': None,
'matplotlib': 'http://matplotlib.org',
'numpy': 'http://docs.scipy.org/doc/numpy-1.9.1',
'scipy': 'http://docs.scipy.org/doc/scipy-0.11.0/reference',
'mayavi': 'http://docs.enthought.com/mayavi/mayavi'},
'find_mayavi_figures': True,
'default_thumb_file': '_static/mne_helmet.png',
}
| bsd-3-clause |
datapythonista/pandas | pandas/io/common.py | 3 | 31255 | """Common IO api utilities"""
from __future__ import annotations
import bz2
import codecs
from collections import abc
import dataclasses
import gzip
from io import (
BufferedIOBase,
BytesIO,
RawIOBase,
StringIO,
TextIOWrapper,
)
import mmap
import os
from typing import (
IO,
Any,
AnyStr,
Mapping,
cast,
)
from urllib.parse import (
urljoin,
urlparse as parse_url,
uses_netloc,
uses_params,
uses_relative,
)
import warnings
import zipfile
from pandas._typing import (
Buffer,
CompressionDict,
CompressionOptions,
FileOrBuffer,
FilePathOrBuffer,
StorageOptions,
)
from pandas.compat import (
get_lzma_file,
import_lzma,
)
from pandas.compat._optional import import_optional_dependency
from pandas.core.dtypes.common import is_file_like
lzma = import_lzma()
_VALID_URLS = set(uses_relative + uses_netloc + uses_params)
_VALID_URLS.discard("")
@dataclasses.dataclass
class IOArgs:
"""
Return value of io/common.py:_get_filepath_or_buffer.
Note (copy&past from io/parsers):
filepath_or_buffer can be Union[FilePathOrBuffer, s3fs.S3File, gcsfs.GCSFile]
though mypy handling of conditional imports is difficult.
See https://github.com/python/mypy/issues/1297
"""
filepath_or_buffer: FileOrBuffer
encoding: str
mode: str
compression: CompressionDict
should_close: bool = False
@dataclasses.dataclass
class IOHandles:
"""
Return value of io/common.py:get_handle
Can be used as a context manager.
This is used to easily close created buffers and to handle corner cases when
TextIOWrapper is inserted.
handle: The file handle to be used.
created_handles: All file handles that are created by get_handle
is_wrapped: Whether a TextIOWrapper needs to be detached.
"""
handle: Buffer
compression: CompressionDict
created_handles: list[Buffer] = dataclasses.field(default_factory=list)
is_wrapped: bool = False
is_mmap: bool = False
def close(self) -> None:
"""
Close all created buffers.
Note: If a TextIOWrapper was inserted, it is flushed and detached to
avoid closing the potentially user-created buffer.
"""
if self.is_wrapped:
assert isinstance(self.handle, TextIOWrapper)
self.handle.flush()
self.handle.detach()
self.created_handles.remove(self.handle)
try:
for handle in self.created_handles:
handle.close()
except (OSError, ValueError):
pass
self.created_handles = []
self.is_wrapped = False
def __enter__(self) -> IOHandles:
return self
def __exit__(self, *args: Any) -> None:
self.close()
def is_url(url) -> bool:
"""
Check to see if a URL has a valid protocol.
Parameters
----------
url : str or unicode
Returns
-------
isurl : bool
If `url` has a valid protocol return True otherwise False.
"""
if not isinstance(url, str):
return False
return parse_url(url).scheme in _VALID_URLS
def _expand_user(filepath_or_buffer: FileOrBuffer[AnyStr]) -> FileOrBuffer[AnyStr]:
"""
Return the argument with an initial component of ~ or ~user
replaced by that user's home directory.
Parameters
----------
filepath_or_buffer : object to be converted if possible
Returns
-------
expanded_filepath_or_buffer : an expanded filepath or the
input if not expandable
"""
if isinstance(filepath_or_buffer, str):
return os.path.expanduser(filepath_or_buffer)
return filepath_or_buffer
def validate_header_arg(header) -> None:
if isinstance(header, bool):
raise TypeError(
"Passing a bool to header is invalid. Use header=None for no header or "
"header=int or list-like of ints to specify "
"the row(s) making up the column names"
)
def stringify_path(
filepath_or_buffer: FilePathOrBuffer[AnyStr],
convert_file_like: bool = False,
) -> FileOrBuffer[AnyStr]:
"""
Attempt to convert a path-like object to a string.
Parameters
----------
filepath_or_buffer : object to be converted
Returns
-------
str_filepath_or_buffer : maybe a string version of the object
Notes
-----
Objects supporting the fspath protocol (python 3.6+) are coerced
according to its __fspath__ method.
Any other object is passed through unchanged, which includes bytes,
strings, buffers, or anything else that's not even path-like.
"""
if not convert_file_like and is_file_like(filepath_or_buffer):
# GH 38125: some fsspec objects implement os.PathLike but have already opened a
# file. This prevents opening the file a second time. infer_compression calls
# this function with convert_file_like=True to infer the compression.
return cast(FileOrBuffer[AnyStr], filepath_or_buffer)
if isinstance(filepath_or_buffer, os.PathLike):
filepath_or_buffer = filepath_or_buffer.__fspath__()
return _expand_user(filepath_or_buffer)
def urlopen(*args, **kwargs):
"""
Lazy-import wrapper for stdlib urlopen, as that imports a big chunk of
the stdlib.
"""
import urllib.request
return urllib.request.urlopen(*args, **kwargs)
def is_fsspec_url(url: FilePathOrBuffer) -> bool:
"""
Returns true if the given URL looks like
something fsspec can handle
"""
return (
isinstance(url, str)
and "://" in url
and not url.startswith(("http://", "https://"))
)
def _get_filepath_or_buffer(
filepath_or_buffer: FilePathOrBuffer,
encoding: str = "utf-8",
compression: CompressionOptions = None,
mode: str = "r",
storage_options: StorageOptions = None,
) -> IOArgs:
"""
If the filepath_or_buffer is a url, translate and return the buffer.
Otherwise passthrough.
Parameters
----------
filepath_or_buffer : a url, filepath (str, py.path.local or pathlib.Path),
or buffer
compression : {{'gzip', 'bz2', 'zip', 'xz', None}}, optional
encoding : the encoding to use to decode bytes, default is 'utf-8'
mode : str, optional
storage_options : dict, optional
Extra options that make sense for a particular storage connection, e.g.
host, port, username, password, etc., if using a URL that will
be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". An error
will be raised if providing this argument with a local path or
a file-like buffer. See the fsspec and backend storage implementation
docs for the set of allowed keys and values
.. versionadded:: 1.2.0
..versionchange:: 1.2.0
Returns the dataclass IOArgs.
"""
filepath_or_buffer = stringify_path(filepath_or_buffer)
# handle compression dict
compression_method, compression = get_compression_method(compression)
compression_method = infer_compression(filepath_or_buffer, compression_method)
# GH21227 internal compression is not used for non-binary handles.
if compression_method and hasattr(filepath_or_buffer, "write") and "b" not in mode:
warnings.warn(
"compression has no effect when passing a non-binary object as input.",
RuntimeWarning,
stacklevel=2,
)
compression_method = None
compression = dict(compression, method=compression_method)
# uniform encoding names
if encoding is not None:
encoding = encoding.replace("_", "-").lower()
# bz2 and xz do not write the byte order mark for utf-16 and utf-32
# print a warning when writing such files
if (
"w" in mode
and compression_method in ["bz2", "xz"]
and encoding in ["utf-16", "utf-32"]
):
warnings.warn(
f"{compression} will not write the byte order mark for {encoding}",
UnicodeWarning,
)
# Use binary mode when converting path-like objects to file-like objects (fsspec)
# except when text mode is explicitly requested. The original mode is returned if
# fsspec is not used.
fsspec_mode = mode
if "t" not in fsspec_mode and "b" not in fsspec_mode:
fsspec_mode += "b"
if isinstance(filepath_or_buffer, str) and is_url(filepath_or_buffer):
# TODO: fsspec can also handle HTTP via requests, but leaving this
# unchanged. using fsspec appears to break the ability to infer if the
# server responded with gzipped data
storage_options = storage_options or {}
# waiting until now for importing to match intended lazy logic of
# urlopen function defined elsewhere in this module
import urllib.request
# assuming storage_options is to be interpreted as headers
req_info = urllib.request.Request(filepath_or_buffer, headers=storage_options)
with urlopen(req_info) as req:
content_encoding = req.headers.get("Content-Encoding", None)
if content_encoding == "gzip":
# Override compression based on Content-Encoding header
compression = {"method": "gzip"}
reader = BytesIO(req.read())
return IOArgs(
filepath_or_buffer=reader,
encoding=encoding,
compression=compression,
should_close=True,
mode=fsspec_mode,
)
if is_fsspec_url(filepath_or_buffer):
assert isinstance(
filepath_or_buffer, str
) # just to appease mypy for this branch
# two special-case s3-like protocols; these have special meaning in Hadoop,
# but are equivalent to just "s3" from fsspec's point of view
# cc #11071
if filepath_or_buffer.startswith("s3a://"):
filepath_or_buffer = filepath_or_buffer.replace("s3a://", "s3://")
if filepath_or_buffer.startswith("s3n://"):
filepath_or_buffer = filepath_or_buffer.replace("s3n://", "s3://")
fsspec = import_optional_dependency("fsspec")
# If botocore is installed we fallback to reading with anon=True
# to allow reads from public buckets
err_types_to_retry_with_anon: list[Any] = []
try:
import_optional_dependency("botocore")
from botocore.exceptions import (
ClientError,
NoCredentialsError,
)
err_types_to_retry_with_anon = [
ClientError,
NoCredentialsError,
PermissionError,
]
except ImportError:
pass
try:
file_obj = fsspec.open(
filepath_or_buffer, mode=fsspec_mode, **(storage_options or {})
).open()
# GH 34626 Reads from Public Buckets without Credentials needs anon=True
except tuple(err_types_to_retry_with_anon):
if storage_options is None:
storage_options = {"anon": True}
else:
# don't mutate user input.
storage_options = dict(storage_options)
storage_options["anon"] = True
file_obj = fsspec.open(
filepath_or_buffer, mode=fsspec_mode, **(storage_options or {})
).open()
return IOArgs(
filepath_or_buffer=file_obj,
encoding=encoding,
compression=compression,
should_close=True,
mode=fsspec_mode,
)
elif storage_options:
raise ValueError(
"storage_options passed with file object or non-fsspec file path"
)
if isinstance(filepath_or_buffer, (str, bytes, mmap.mmap)):
return IOArgs(
filepath_or_buffer=_expand_user(filepath_or_buffer),
encoding=encoding,
compression=compression,
should_close=False,
mode=mode,
)
if not is_file_like(filepath_or_buffer):
msg = f"Invalid file path or buffer object type: {type(filepath_or_buffer)}"
raise ValueError(msg)
return IOArgs(
filepath_or_buffer=filepath_or_buffer,
encoding=encoding,
compression=compression,
should_close=False,
mode=mode,
)
def file_path_to_url(path: str) -> str:
"""
converts an absolute native path to a FILE URL.
Parameters
----------
path : a path in native format
Returns
-------
a valid FILE URL
"""
# lazify expensive import (~30ms)
from urllib.request import pathname2url
return urljoin("file:", pathname2url(path))
_compression_to_extension = {"gzip": ".gz", "bz2": ".bz2", "zip": ".zip", "xz": ".xz"}
def get_compression_method(
compression: CompressionOptions,
) -> tuple[str | None, CompressionDict]:
"""
Simplifies a compression argument to a compression method string and
a mapping containing additional arguments.
Parameters
----------
compression : str or mapping
If string, specifies the compression method. If mapping, value at key
'method' specifies compression method.
Returns
-------
tuple of ({compression method}, Optional[str]
{compression arguments}, Dict[str, Any])
Raises
------
ValueError on mapping missing 'method' key
"""
compression_method: str | None
if isinstance(compression, Mapping):
compression_args = dict(compression)
try:
compression_method = compression_args.pop("method")
except KeyError as err:
raise ValueError("If mapping, compression must have key 'method'") from err
else:
compression_args = {}
compression_method = compression
return compression_method, compression_args
def infer_compression(
filepath_or_buffer: FilePathOrBuffer, compression: str | None
) -> str | None:
"""
Get the compression method for filepath_or_buffer. If compression='infer',
the inferred compression method is returned. Otherwise, the input
compression method is returned unchanged, unless it's invalid, in which
case an error is raised.
Parameters
----------
filepath_or_buffer : str or file handle
File path or object.
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}
If 'infer' and `filepath_or_buffer` is path-like, then detect
compression from the following extensions: '.gz', '.bz2', '.zip',
or '.xz' (otherwise no compression).
Returns
-------
string or None
Raises
------
ValueError on invalid compression specified.
"""
if compression is None:
return None
# Infer compression
if compression == "infer":
# Convert all path types (e.g. pathlib.Path) to strings
filepath_or_buffer = stringify_path(filepath_or_buffer, convert_file_like=True)
if not isinstance(filepath_or_buffer, str):
# Cannot infer compression of a buffer, assume no compression
return None
# Infer compression from the filename/URL extension
for compression, extension in _compression_to_extension.items():
if filepath_or_buffer.lower().endswith(extension):
return compression
return None
# Compression has been specified. Check that it's valid
if compression in _compression_to_extension:
return compression
# https://github.com/python/mypy/issues/5492
# Unsupported operand types for + ("List[Optional[str]]" and "List[str]")
valid = ["infer", None] + sorted(
_compression_to_extension
) # type: ignore[operator]
msg = (
f"Unrecognized compression type: {compression}\n"
f"Valid compression types are {valid}"
)
raise ValueError(msg)
def get_handle(
path_or_buf: FilePathOrBuffer,
mode: str,
encoding: str | None = None,
compression: CompressionOptions = None,
memory_map: bool = False,
is_text: bool = True,
errors: str | None = None,
storage_options: StorageOptions = None,
) -> IOHandles:
"""
Get file handle for given path/buffer and mode.
Parameters
----------
path_or_buf : str or file handle
File path or object.
mode : str
Mode to open path_or_buf with.
encoding : str or None
Encoding to use.
compression : str or dict, default None
If string, specifies compression mode. If dict, value at key 'method'
specifies compression mode. Compression mode must be one of {'infer',
'gzip', 'bz2', 'zip', 'xz', None}. If compression mode is 'infer'
and `filepath_or_buffer` is path-like, then detect compression from
the following extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise
no compression). If dict and compression mode is one of
{'zip', 'gzip', 'bz2'}, or inferred as one of the above,
other entries passed as additional compression options.
.. versionchanged:: 1.0.0
May now be a dict with key 'method' as compression mode
and other keys as compression options if compression
mode is 'zip'.
.. versionchanged:: 1.1.0
Passing compression options as keys in dict is now
supported for compression modes 'gzip' and 'bz2' as well as 'zip'.
memory_map : bool, default False
See parsers._parser_params for more information.
is_text : bool, default True
Whether the type of the content passed to the file/buffer is string or
bytes. This is not the same as `"b" not in mode`. If a string content is
passed to a binary file/buffer, a wrapper is inserted.
errors : str, default 'strict'
Specifies how encoding and decoding errors are to be handled.
See the errors argument for :func:`open` for a full list
of options.
storage_options: StorageOptions = None
Passed to _get_filepath_or_buffer
.. versionchanged:: 1.2.0
Returns the dataclass IOHandles
"""
# Windows does not default to utf-8. Set to utf-8 for a consistent behavior
encoding = encoding or "utf-8"
# read_csv does not know whether the buffer is opened in binary/text mode
if _is_binary_mode(path_or_buf, mode) and "b" not in mode:
mode += "b"
# valdiate errors
if isinstance(errors, str):
errors = errors.lower()
if errors not in (
None,
"strict",
"ignore",
"replace",
"xmlcharrefreplace",
"backslashreplace",
"namereplace",
"surrogateescape",
"surrogatepass",
):
raise ValueError(
f"Invalid value for `encoding_errors` ({errors}). Please see "
+ "https://docs.python.org/3/library/codecs.html#error-handlers "
+ "for valid values."
)
# open URLs
ioargs = _get_filepath_or_buffer(
path_or_buf,
encoding=encoding,
compression=compression,
mode=mode,
storage_options=storage_options,
)
handle = ioargs.filepath_or_buffer
handles: list[Buffer]
# memory mapping needs to be the first step
handle, memory_map, handles = _maybe_memory_map(
handle,
memory_map,
ioargs.encoding,
ioargs.mode,
errors,
ioargs.compression["method"] not in _compression_to_extension,
)
is_path = isinstance(handle, str)
compression_args = dict(ioargs.compression)
compression = compression_args.pop("method")
if compression:
# compression libraries do not like an explicit text-mode
ioargs.mode = ioargs.mode.replace("t", "")
# GZ Compression
if compression == "gzip":
if is_path:
assert isinstance(handle, str)
handle = gzip.GzipFile(
filename=handle,
mode=ioargs.mode,
**compression_args,
)
else:
handle = gzip.GzipFile(
# error: Argument "fileobj" to "GzipFile" has incompatible type
# "Union[str, Union[IO[Any], RawIOBase, BufferedIOBase, TextIOBase,
# TextIOWrapper, mmap]]"; expected "Optional[IO[bytes]]"
fileobj=handle, # type: ignore[arg-type]
mode=ioargs.mode,
**compression_args,
)
# BZ Compression
elif compression == "bz2":
handle = bz2.BZ2File(
# Argument 1 to "BZ2File" has incompatible type "Union[str,
# Union[IO[Any], RawIOBase, BufferedIOBase, TextIOBase, TextIOWrapper,
# mmap]]"; expected "Union[Union[str, bytes, _PathLike[str],
# _PathLike[bytes]], IO[bytes]]"
handle, # type: ignore[arg-type]
mode=ioargs.mode,
**compression_args,
)
# ZIP Compression
elif compression == "zip":
handle = _BytesZipFile(handle, ioargs.mode, **compression_args)
if handle.mode == "r":
handles.append(handle)
zip_names = handle.namelist()
if len(zip_names) == 1:
handle = handle.open(zip_names.pop())
elif len(zip_names) == 0:
raise ValueError(f"Zero files found in ZIP file {path_or_buf}")
else:
raise ValueError(
"Multiple files found in ZIP file. "
f"Only one file per ZIP: {zip_names}"
)
# XZ Compression
elif compression == "xz":
handle = get_lzma_file(lzma)(handle, ioargs.mode)
# Unrecognized Compression
else:
msg = f"Unrecognized compression type: {compression}"
raise ValueError(msg)
assert not isinstance(handle, str)
handles.append(handle)
elif isinstance(handle, str):
# Check whether the filename is to be opened in binary mode.
# Binary mode does not support 'encoding' and 'newline'.
if ioargs.encoding and "b" not in ioargs.mode:
# Encoding
handle = open(
handle,
ioargs.mode,
encoding=ioargs.encoding,
errors=errors,
newline="",
)
else:
# Binary mode
handle = open(handle, ioargs.mode)
handles.append(handle)
# Convert BytesIO or file objects passed with an encoding
is_wrapped = False
if is_text and (compression or _is_binary_mode(handle, ioargs.mode)):
handle = TextIOWrapper(
# error: Argument 1 to "TextIOWrapper" has incompatible type
# "Union[IO[bytes], IO[Any], RawIOBase, BufferedIOBase, TextIOBase, mmap]";
# expected "IO[bytes]"
handle, # type: ignore[arg-type]
encoding=ioargs.encoding,
errors=errors,
newline="",
)
handles.append(handle)
# only marked as wrapped when the caller provided a handle
is_wrapped = not (
isinstance(ioargs.filepath_or_buffer, str) or ioargs.should_close
)
handles.reverse() # close the most recently added buffer first
if ioargs.should_close:
assert not isinstance(ioargs.filepath_or_buffer, str)
handles.append(ioargs.filepath_or_buffer)
assert not isinstance(handle, str)
return IOHandles(
handle=handle,
created_handles=handles,
is_wrapped=is_wrapped,
is_mmap=memory_map,
compression=ioargs.compression,
)
# error: Definition of "__exit__" in base class "ZipFile" is incompatible with
# definition in base class "BytesIO" [misc]
# error: Definition of "__enter__" in base class "ZipFile" is incompatible with
# definition in base class "BytesIO" [misc]
# error: Definition of "__enter__" in base class "ZipFile" is incompatible with
# definition in base class "BinaryIO" [misc]
# error: Definition of "__enter__" in base class "ZipFile" is incompatible with
# definition in base class "IO" [misc]
# error: Definition of "read" in base class "ZipFile" is incompatible with
# definition in base class "BytesIO" [misc]
# error: Definition of "read" in base class "ZipFile" is incompatible with
# definition in base class "IO" [misc]
class _BytesZipFile(zipfile.ZipFile, BytesIO): # type: ignore[misc]
"""
Wrapper for standard library class ZipFile and allow the returned file-like
handle to accept byte strings via `write` method.
BytesIO provides attributes of file-like object and ZipFile.writestr writes
bytes strings into a member of the archive.
"""
# GH 17778
def __init__(
self,
file: FilePathOrBuffer,
mode: str,
archive_name: str | None = None,
**kwargs,
):
mode = mode.replace("b", "")
self.archive_name = archive_name
self.multiple_write_buffer: StringIO | BytesIO | None = None
kwargs_zip: dict[str, Any] = {"compression": zipfile.ZIP_DEFLATED}
kwargs_zip.update(kwargs)
# error: Argument 1 to "__init__" of "ZipFile" has incompatible type
# "Union[_PathLike[str], Union[str, Union[IO[Any], RawIOBase, BufferedIOBase,
# TextIOBase, TextIOWrapper, mmap]]]"; expected "Union[Union[str,
# _PathLike[str]], IO[bytes]]"
super().__init__(file, mode, **kwargs_zip) # type: ignore[arg-type]
def write(self, data):
# buffer multiple write calls, write on flush
if self.multiple_write_buffer is None:
self.multiple_write_buffer = (
BytesIO() if isinstance(data, bytes) else StringIO()
)
self.multiple_write_buffer.write(data)
def flush(self) -> None:
# write to actual handle and close write buffer
if self.multiple_write_buffer is None or self.multiple_write_buffer.closed:
return
# ZipFile needs a non-empty string
archive_name = self.archive_name or self.filename or "zip"
with self.multiple_write_buffer:
super().writestr(archive_name, self.multiple_write_buffer.getvalue())
def close(self):
self.flush()
super().close()
@property
def closed(self):
return self.fp is None
class _MMapWrapper(abc.Iterator):
"""
Wrapper for the Python's mmap class so that it can be properly read in
by Python's csv.reader class.
Parameters
----------
f : file object
File object to be mapped onto memory. Must support the 'fileno'
method or have an equivalent attribute
"""
def __init__(
self,
f: IO,
encoding: str = "utf-8",
errors: str = "strict",
decode: bool = True,
):
self.encoding = encoding
self.errors = errors
self.decoder = codecs.getincrementaldecoder(encoding)(errors=errors)
self.decode = decode
self.attributes = {}
for attribute in ("seekable", "readable", "writeable"):
if not hasattr(f, attribute):
continue
self.attributes[attribute] = getattr(f, attribute)()
self.mmap = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
def __getattr__(self, name: str):
if name in self.attributes:
return lambda: self.attributes[name]
return getattr(self.mmap, name)
def __iter__(self) -> _MMapWrapper:
return self
def read(self, size: int = -1) -> str | bytes:
# CSV c-engine uses read instead of iterating
content: bytes = self.mmap.read(size)
if self.decode:
# memory mapping is applied before compression. Encoding should
# be applied to the de-compressed data.
return content.decode(self.encoding, errors=self.errors)
return content
def __next__(self) -> str:
newbytes = self.mmap.readline()
# readline returns bytes, not str, but Python's CSV reader
# expects str, so convert the output to str before continuing
newline = self.decoder.decode(newbytes)
# mmap doesn't raise if reading past the allocated
# data but instead returns an empty string, so raise
# if that is returned
if newline == "":
raise StopIteration
# IncrementalDecoder seems to push newline to the next line
return newline.lstrip("\n")
def _maybe_memory_map(
handle: FileOrBuffer,
memory_map: bool,
encoding: str,
mode: str,
errors: str | None,
decode: bool,
) -> tuple[FileOrBuffer, bool, list[Buffer]]:
"""Try to memory map file/buffer."""
handles: list[Buffer] = []
memory_map &= hasattr(handle, "fileno") or isinstance(handle, str)
if not memory_map:
return handle, memory_map, handles
# need to open the file first
if isinstance(handle, str):
if encoding and "b" not in mode:
# Encoding
handle = open(handle, mode, encoding=encoding, errors=errors, newline="")
else:
# Binary mode
handle = open(handle, mode)
handles.append(handle)
try:
# error: Argument 1 to "_MMapWrapper" has incompatible type "Union[IO[Any],
# RawIOBase, BufferedIOBase, TextIOBase, mmap]"; expected "IO[Any]"
wrapped = cast(
mmap.mmap,
_MMapWrapper(handle, encoding, errors, decode), # type: ignore[arg-type]
)
handle.close()
handles.remove(handle)
handles.append(wrapped)
handle = wrapped
except Exception:
# we catch any errors that may have occurred
# because that is consistent with the lower-level
# functionality of the C engine (pd.read_csv), so
# leave the file handler as is then
memory_map = False
return handle, memory_map, handles
def file_exists(filepath_or_buffer: FilePathOrBuffer) -> bool:
"""Test whether file exists."""
exists = False
filepath_or_buffer = stringify_path(filepath_or_buffer)
if not isinstance(filepath_or_buffer, str):
return exists
try:
exists = os.path.exists(filepath_or_buffer)
# gh-5874: if the filepath is too long will raise here
except (TypeError, ValueError):
pass
return exists
def _is_binary_mode(handle: FilePathOrBuffer, mode: str) -> bool:
"""Whether the handle is opened in binary mode"""
# specified by user
if "t" in mode or "b" in mode:
return "b" in mode
# classes that expect string but have 'b' in mode
text_classes = (codecs.StreamWriter, codecs.StreamReader, codecs.StreamReaderWriter)
if issubclass(type(handle), text_classes):
return False
# classes that expect bytes
binary_classes = (BufferedIOBase, RawIOBase)
return isinstance(handle, binary_classes) or "b" in getattr(handle, "mode", mode)
| bsd-3-clause |
Midafi/scikit-image | doc/examples/plot_holes_and_peaks.py | 3 | 2626 | """
===============================
Filling holes and finding peaks
===============================
In this example, we fill holes (i.e. isolated, dark spots) in an image using
morphological reconstruction by erosion. Erosion expands the minimal values of
the seed image until it encounters a mask image. Thus, the seed image and mask
image represent the maximum and minimum possible values of the reconstructed
image.
We start with an image containing both peaks and holes:
"""
import matplotlib.pyplot as plt
from skimage import data
from skimage.exposure import rescale_intensity
image = data.moon()
# Rescale image intensity so that we can see dim features.
image = rescale_intensity(image, in_range=(50, 200))
# convenience function for plotting images
def imshow(image, title, **kwargs):
fig, ax = plt.subplots(figsize=(5, 4))
ax.imshow(image, **kwargs)
ax.axis('off')
ax.set_title(title)
imshow(image, 'Original image')
"""
.. image:: PLOT2RST.current_figure
Now we need to create the seed image, where the minima represent the starting
points for erosion. To fill holes, we initialize the seed image to the maximum
value of the original image. Along the borders, however, we use the original
values of the image. These border pixels will be the starting points for the
erosion process. We then limit the erosion by setting the mask to the values
of the original image.
"""
import numpy as np
from skimage.morphology import reconstruction
seed = np.copy(image)
seed[1:-1, 1:-1] = image.max()
mask = image
filled = reconstruction(seed, mask, method='erosion')
imshow(filled, 'after filling holes', vmin=image.min(), vmax=image.max())
"""
.. image:: PLOT2RST.current_figure
As shown above, eroding inward from the edges removes holes, since (by
definition) holes are surrounded by pixels of brighter value. Finally, we can
isolate the dark regions by subtracting the reconstructed image from the
original image.
"""
imshow(image - filled, 'holes')
# plt.title('holes')
"""
.. image:: PLOT2RST.current_figure
Alternatively, we can find bright spots in an image using morphological
reconstruction by dilation. Dilation is the inverse of erosion and expands the
*maximal* values of the seed image until it encounters a mask image. Since this
is an inverse operation, we initialize the seed image to the minimum image
intensity instead of the maximum. The remainder of the process is the same.
"""
seed = np.copy(image)
seed[1:-1, 1:-1] = image.min()
rec = reconstruction(seed, mask, method='dilation')
imshow(image - rec, 'peaks')
plt.show()
"""
.. image:: PLOT2RST.current_figure
"""
| bsd-3-clause |
zfrenchee/pandas | pandas/compat/pickle_compat.py | 1 | 5787 | """
Support pre-0.12 series pickle compatibility.
"""
import sys
import pandas # noqa
import copy
import pickle as pkl
from pandas import compat, Index
from pandas.compat import u, string_types # noqa
def load_reduce(self):
stack = self.stack
args = stack.pop()
func = stack[-1]
if len(args) and type(args[0]) is type:
n = args[0].__name__ # noqa
try:
stack[-1] = func(*args)
return
except Exception as e:
# If we have a deprecated function,
# try to replace and try again.
msg = '_reconstruct: First argument must be a sub-type of ndarray'
if msg in str(e):
try:
cls = args[0]
stack[-1] = object.__new__(cls)
return
except:
pass
# try to re-encode the arguments
if getattr(self, 'encoding', None) is not None:
args = tuple([arg.encode(self.encoding)
if isinstance(arg, string_types)
else arg for arg in args])
try:
stack[-1] = func(*args)
return
except:
pass
# unknown exception, re-raise
if getattr(self, 'is_verbose', None):
print(sys.exc_info())
print(func, args)
raise
# If classes are moved, provide compat here.
_class_locations_map = {
# 15477
('pandas.core.base', 'FrozenNDArray'):
('pandas.core.indexes.frozen', 'FrozenNDArray'),
('pandas.core.base', 'FrozenList'):
('pandas.core.indexes.frozen', 'FrozenList'),
# 10890
('pandas.core.series', 'TimeSeries'):
('pandas.core.series', 'Series'),
('pandas.sparse.series', 'SparseTimeSeries'):
('pandas.core.sparse.series', 'SparseSeries'),
# 12588, extensions moving
('pandas._sparse', 'BlockIndex'):
('pandas._libs.sparse', 'BlockIndex'),
('pandas.tslib', 'Timestamp'):
('pandas._libs.tslib', 'Timestamp'),
# 18543 moving period
('pandas._period', 'Period'): ('pandas._libs.tslibs.period', 'Period'),
('pandas._libs.period', 'Period'):
('pandas._libs.tslibs.period', 'Period'),
# 18014 moved __nat_unpickle from _libs.tslib-->_libs.tslibs.nattype
('pandas.tslib', '__nat_unpickle'):
('pandas._libs.tslibs.nattype', '__nat_unpickle'),
('pandas._libs.tslib', '__nat_unpickle'):
('pandas._libs.tslibs.nattype', '__nat_unpickle'),
# 15998 top-level dirs moving
('pandas.sparse.array', 'SparseArray'):
('pandas.core.sparse.array', 'SparseArray'),
('pandas.sparse.series', 'SparseSeries'):
('pandas.core.sparse.series', 'SparseSeries'),
('pandas.sparse.frame', 'SparseDataFrame'):
('pandas.core.sparse.frame', 'SparseDataFrame'),
('pandas.indexes.base', '_new_Index'):
('pandas.core.indexes.base', '_new_Index'),
('pandas.indexes.base', 'Index'):
('pandas.core.indexes.base', 'Index'),
('pandas.indexes.numeric', 'Int64Index'):
('pandas.core.indexes.numeric', 'Int64Index'),
('pandas.indexes.range', 'RangeIndex'):
('pandas.core.indexes.range', 'RangeIndex'),
('pandas.indexes.multi', 'MultiIndex'):
('pandas.core.indexes.multi', 'MultiIndex'),
('pandas.tseries.index', '_new_DatetimeIndex'):
('pandas.core.indexes.datetimes', '_new_DatetimeIndex'),
('pandas.tseries.index', 'DatetimeIndex'):
('pandas.core.indexes.datetimes', 'DatetimeIndex'),
('pandas.tseries.period', 'PeriodIndex'):
('pandas.core.indexes.period', 'PeriodIndex')
}
# our Unpickler sub-class to override methods and some dispatcher
# functions for compat
if compat.PY3:
class Unpickler(pkl._Unpickler):
def find_class(self, module, name):
# override superclass
key = (module, name)
module, name = _class_locations_map.get(key, key)
return super(Unpickler, self).find_class(module, name)
else:
class Unpickler(pkl.Unpickler):
def find_class(self, module, name):
# override superclass
key = (module, name)
module, name = _class_locations_map.get(key, key)
__import__(module)
mod = sys.modules[module]
klass = getattr(mod, name)
return klass
Unpickler.dispatch = copy.copy(Unpickler.dispatch)
Unpickler.dispatch[pkl.REDUCE[0]] = load_reduce
def load_newobj(self):
args = self.stack.pop()
cls = self.stack[-1]
# compat
if issubclass(cls, Index):
obj = object.__new__(cls)
else:
obj = cls.__new__(cls, *args)
self.stack[-1] = obj
Unpickler.dispatch[pkl.NEWOBJ[0]] = load_newobj
def load_newobj_ex(self):
kwargs = self.stack.pop()
args = self.stack.pop()
cls = self.stack.pop()
# compat
if issubclass(cls, Index):
obj = object.__new__(cls)
else:
obj = cls.__new__(cls, *args, **kwargs)
self.append(obj)
try:
Unpickler.dispatch[pkl.NEWOBJ_EX[0]] = load_newobj_ex
except:
pass
def load(fh, encoding=None, compat=False, is_verbose=False):
"""load a pickle, with a provided encoding
if compat is True:
fake the old class hierarchy
if it works, then return the new type objects
Parameters
----------
fh: a filelike object
encoding: an optional encoding
compat: provide Series compatibility mode, boolean, default False
is_verbose: show exception output
"""
try:
fh.seek(0)
if encoding is not None:
up = Unpickler(fh, encoding=encoding)
else:
up = Unpickler(fh)
up.is_verbose = is_verbose
return up.load()
except:
raise
| bsd-3-clause |
zorroblue/scikit-learn | sklearn/feature_extraction/dict_vectorizer.py | 16 | 12486 | # Authors: Lars Buitinck
# Dan Blanchard <[email protected]>
# License: BSD 3 clause
from array import array
from collections import Mapping
from operator import itemgetter
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..utils import check_array, tosequence
def _tosequence(X):
"""Turn X into a sequence or ndarray, avoiding a copy if possible."""
if isinstance(X, Mapping): # single sample
return [X]
else:
return tosequence(X)
class DictVectorizer(BaseEstimator, TransformerMixin):
"""Transforms lists of feature-value mappings to vectors.
This transformer turns lists of mappings (dict-like objects) of feature
names to feature values into Numpy arrays or scipy.sparse matrices for use
with scikit-learn estimators.
When feature values are strings, this transformer will do a binary one-hot
(aka one-of-K) coding: one boolean-valued feature is constructed for each
of the possible string values that the feature can take on. For instance,
a feature "f" that can take on the values "ham" and "spam" will become two
features in the output, one signifying "f=ham", the other "f=spam".
However, note that this transformer will only do a binary one-hot encoding
when feature values are of type string. If categorical features are
represented as numeric values such as int, the DictVectorizer can be
followed by OneHotEncoder to complete binary one-hot encoding.
Features that do not occur in a sample (mapping) will have a zero value
in the resulting array/matrix.
Read more in the :ref:`User Guide <dict_feature_extraction>`.
Parameters
----------
dtype : callable, optional
The type of feature values. Passed to Numpy array/scipy.sparse matrix
constructors as the dtype argument.
separator : string, optional
Separator string used when constructing new features for one-hot
coding.
sparse : boolean, optional.
Whether transform should produce scipy.sparse matrices.
True by default.
sort : boolean, optional.
Whether ``feature_names_`` and ``vocabulary_`` should be sorted when fitting.
True by default.
Attributes
----------
vocabulary_ : dict
A dictionary mapping feature names to feature indices.
feature_names_ : list
A list of length n_features containing the feature names (e.g., "f=ham"
and "f=spam").
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> v = DictVectorizer(sparse=False)
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> X
array([[ 2., 0., 1.],
[ 0., 1., 3.]])
>>> v.inverse_transform(X) == \
[{'bar': 2.0, 'foo': 1.0}, {'baz': 1.0, 'foo': 3.0}]
True
>>> v.transform({'foo': 4, 'unseen_feature': 3})
array([[ 0., 0., 4.]])
See also
--------
FeatureHasher : performs vectorization using only a hash function.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, dtype=np.float64, separator="=", sparse=True,
sort=True):
self.dtype = dtype
self.separator = separator
self.sparse = sparse
self.sort = sort
def fit(self, X, y=None):
"""Learn a list of feature name -> indices mappings.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
self
"""
feature_names = []
vocab = {}
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
if f not in vocab:
feature_names.append(f)
vocab[f] = len(vocab)
if self.sort:
feature_names.sort()
vocab = dict((f, i) for i, f in enumerate(feature_names))
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return self
def _transform(self, X, fitting):
# Sanity check: Python's array has no way of explicitly requesting the
# signed 32-bit integers that scipy.sparse needs, so we use the next
# best thing: typecode "i" (int). However, if that gives larger or
# smaller integers than 32-bit ones, np.frombuffer screws up.
assert array("i").itemsize == 4, (
"sizeof(int) != 4 on your platform; please report this at"
" https://github.com/scikit-learn/scikit-learn/issues and"
" include the output from platform.platform() in your bug report")
dtype = self.dtype
if fitting:
feature_names = []
vocab = {}
else:
feature_names = self.feature_names_
vocab = self.vocabulary_
# Process everything as sparse regardless of setting
X = [X] if isinstance(X, Mapping) else X
indices = array("i")
indptr = array("i", [0])
# XXX we could change values to an array.array as well, but it
# would require (heuristic) conversion of dtype to typecode...
values = []
# collect all the possible feature names and build sparse matrix at
# same time
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
v = 1
if f in vocab:
indices.append(vocab[f])
values.append(dtype(v))
else:
if fitting:
feature_names.append(f)
vocab[f] = len(vocab)
indices.append(vocab[f])
values.append(dtype(v))
indptr.append(len(indices))
if len(indptr) == 1:
raise ValueError("Sample sequence X is empty.")
indices = np.frombuffer(indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
shape = (len(indptr) - 1, len(vocab))
result_matrix = sp.csr_matrix((values, indices, indptr),
shape=shape, dtype=dtype)
# Sort everything if asked
if fitting and self.sort:
feature_names.sort()
map_index = np.empty(len(feature_names), dtype=np.int32)
for new_val, f in enumerate(feature_names):
map_index[new_val] = vocab[f]
vocab[f] = new_val
result_matrix = result_matrix[:, map_index]
if self.sparse:
result_matrix.sort_indices()
else:
result_matrix = result_matrix.toarray()
if fitting:
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return result_matrix
def fit_transform(self, X, y=None):
"""Learn a list of feature name -> indices mappings and transform X.
Like fit(X) followed by transform(X), but does not require
materializing X in memory.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
return self._transform(X, fitting=True)
def inverse_transform(self, X, dict_type=dict):
"""Transform array or sparse matrix X back to feature mappings.
X must have been produced by this DictVectorizer's transform or
fit_transform method; it may only have passed through transformers
that preserve the number of features and their order.
In the case of one-hot/one-of-K coding, the constructed feature
names and values are returned rather than the original ones.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Sample matrix.
dict_type : callable, optional
Constructor for feature mappings. Must conform to the
collections.Mapping API.
Returns
-------
D : list of dict_type objects, length = n_samples
Feature mappings for the samples in X.
"""
# COO matrix is not subscriptable
X = check_array(X, accept_sparse=['csr', 'csc'])
n_samples = X.shape[0]
names = self.feature_names_
dicts = [dict_type() for _ in xrange(n_samples)]
if sp.issparse(X):
for i, j in zip(*X.nonzero()):
dicts[i][names[j]] = X[i, j]
else:
for i, d in enumerate(dicts):
for j, v in enumerate(X[i, :]):
if v != 0:
d[names[j]] = X[i, j]
return dicts
def transform(self, X):
"""Transform feature->value dicts to array or sparse matrix.
Named features not encountered during fit or fit_transform will be
silently ignored.
Parameters
----------
X : Mapping or iterable over Mappings, length = n_samples
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
if self.sparse:
return self._transform(X, fitting=False)
else:
dtype = self.dtype
vocab = self.vocabulary_
X = _tosequence(X)
Xa = np.zeros((len(X), len(vocab)), dtype=dtype)
for i, x in enumerate(X):
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
v = 1
try:
Xa[i, vocab[f]] = dtype(v)
except KeyError:
pass
return Xa
def get_feature_names(self):
"""Returns a list of feature names, ordered by their indices.
If one-of-K coding is applied to categorical features, this will
include the constructed feature names but not the original ones.
"""
return self.feature_names_
def restrict(self, support, indices=False):
"""Restrict the features to those in support using feature selection.
This function modifies the estimator in-place.
Parameters
----------
support : array-like
Boolean mask or list of indices (as returned by the get_support
member of feature selectors).
indices : boolean, optional
Whether support is a list of indices.
Returns
-------
self
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> from sklearn.feature_selection import SelectKBest, chi2
>>> v = DictVectorizer()
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> support = SelectKBest(chi2, k=2).fit(X, [0, 1])
>>> v.get_feature_names()
['bar', 'baz', 'foo']
>>> v.restrict(support.get_support()) # doctest: +ELLIPSIS
DictVectorizer(dtype=..., separator='=', sort=True,
sparse=True)
>>> v.get_feature_names()
['bar', 'foo']
"""
if not indices:
support = np.where(support)[0]
names = self.feature_names_
new_vocab = {}
for i in support:
new_vocab[names[i]] = len(new_vocab)
self.vocabulary_ = new_vocab
self.feature_names_ = [f for f, i in sorted(six.iteritems(new_vocab),
key=itemgetter(1))]
return self
| bsd-3-clause |
dweinstein/androguard | elsim/elsim/elsim.py | 37 | 16175 | # This file is part of Elsim
#
# Copyright (C) 2012, Anthony Desnos <desnos at t0t0.fr>
# All rights reserved.
#
# Elsim is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Elsim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Elsim. If not, see <http://www.gnu.org/licenses/>.
import logging
ELSIM_VERSION = 0.2
log_elsim = logging.getLogger("elsim")
console_handler = logging.StreamHandler()
console_handler.setFormatter(logging.Formatter("%(levelname)s: %(message)s"))
log_elsim.addHandler(console_handler)
log_runtime = logging.getLogger("elsim.runtime") # logs at runtime
log_interactive = logging.getLogger("elsim.interactive") # logs in interactive functions
log_loading = logging.getLogger("elsim.loading") # logs when loading
def set_debug():
log_elsim.setLevel( logging.DEBUG )
def get_debug():
return log_elsim.getEffectiveLevel() == logging.DEBUG
def warning(x):
log_runtime.warning(x)
def error(x):
log_runtime.error(x)
raise()
def debug(x):
log_runtime.debug(x)
from similarity.similarity import *
FILTER_ELEMENT_METH = "FILTER_ELEMENT_METH"
FILTER_CHECKSUM_METH = "FILTER_CHECKSUM_METH" # function to checksum an element
FILTER_SIM_METH = "FILTER_SIM_METH" # function to calculate the similarity between two elements
FILTER_SORT_METH = "FILTER_SORT_METH" # function to sort all similar elements
FILTER_SORT_VALUE = "FILTER_SORT_VALUE" # value which used in the sort method to eliminate not interesting comparisons
FILTER_SKIPPED_METH = "FILTER_SKIPPED_METH" # object to skip elements
FILTER_SIM_VALUE_METH = "FILTER_SIM_VALUE_METH" # function to modify values of the similarity
BASE = "base"
ELEMENTS = "elements"
HASHSUM = "hashsum"
SIMILAR_ELEMENTS = "similar_elements"
HASHSUM_SIMILAR_ELEMENTS = "hash_similar_elements"
NEW_ELEMENTS = "newelements"
HASHSUM_NEW_ELEMENTS = "hash_new_elements"
DELETED_ELEMENTS = "deletedelements"
IDENTICAL_ELEMENTS = "identicalelements"
INTERNAL_IDENTICAL_ELEMENTS = "internal identical elements"
SKIPPED_ELEMENTS = "skippedelements"
SIMILARITY_ELEMENTS = "similarity_elements"
SIMILARITY_SORT_ELEMENTS = "similarity_sort_elements"
class ElsimNeighbors(object):
def __init__(self, x, ys):
import numpy as np
from sklearn.neighbors import NearestNeighbors
#print x, ys
CI = np.array( [x.checksum.get_signature_entropy(), x.checksum.get_entropy()] )
#print CI, x.get_info()
#print
for i in ys:
CI = np.vstack( (CI, [i.checksum.get_signature_entropy(), i.checksum.get_entropy()]) )
#idx = 0
#for i in np.array(CI)[1:]:
# print idx+1, i, ys[idx].get_info()
# idx += 1
self.neigh = NearestNeighbors(2, 0.4)
self.neigh.fit(np.array(CI))
#print self.neigh.kneighbors( CI[0], len(CI) )
self.CI = CI
self.ys = ys
def cmp_elements(self):
z = self.neigh.kneighbors( self.CI[0], 5 )
l = []
cmp_values = z[0][0]
cmp_elements = z[1][0]
idx = 1
for i in cmp_elements[1:]:
#if cmp_values[idx] > 1.0:
# break
#print i, cmp_values[idx], self.ys[ i - 1 ].get_info()
l.append( self.ys[ i - 1 ] )
idx += 1
return l
def split_elements(el, els):
e1 = {}
for i in els:
e1[ i ] = el.get_associated_element( i )
return e1
####
# elements : entropy raw, hash, signature
#
# set elements : hash
# hash table elements : hash --> element
class Elsim(object):
def __init__(self, e1, e2, F, T=None, C=None, libnative=True, libpath="elsim/elsim/similarity/libsimilarity/libsimilarity.so"):
self.e1 = e1
self.e2 = e2
self.F = F
self.compressor = SNAPPY_COMPRESS
set_debug()
if T != None:
self.F[ FILTER_SORT_VALUE ] = T
if isinstance(libnative, str):
libpath = libnative
libnative = True
self.sim = SIMILARITY( libpath, libnative )
if C != None:
if C in H_COMPRESSOR:
self.compressor = H_COMPRESSOR[ C ]
self.sim.set_compress_type( self.compressor )
else:
self.sim.set_compress_type( self.compressor )
self.filters = {}
self._init_filters()
self._init_index_elements()
self._init_similarity()
self._init_sort_elements()
self._init_new_elements()
def _init_filters(self):
self.filters = {}
self.filters[ BASE ] = {}
self.filters[ BASE ].update( self.F )
self.filters[ ELEMENTS ] = {}
self.filters[ HASHSUM ] = {}
self.filters[ IDENTICAL_ELEMENTS ] = set()
self.filters[ SIMILAR_ELEMENTS ] = []
self.filters[ HASHSUM_SIMILAR_ELEMENTS ] = []
self.filters[ NEW_ELEMENTS ] = set()
self.filters[ HASHSUM_NEW_ELEMENTS ] = []
self.filters[ DELETED_ELEMENTS ] = []
self.filters[ SKIPPED_ELEMENTS ] = []
self.filters[ ELEMENTS ][ self.e1 ] = []
self.filters[ HASHSUM ][ self.e1 ] = []
self.filters[ ELEMENTS ][ self.e2 ] = []
self.filters[ HASHSUM ][ self.e2 ] = []
self.filters[ SIMILARITY_ELEMENTS ] = {}
self.filters[ SIMILARITY_SORT_ELEMENTS ] = {}
self.set_els = {}
self.ref_set_els = {}
self.ref_set_ident = {}
def _init_index_elements(self):
self.__init_index_elements( self.e1, 1 )
self.__init_index_elements( self.e2 )
def __init_index_elements(self, ce, init=0):
self.set_els[ ce ] = set()
self.ref_set_els[ ce ] = {}
self.ref_set_ident[ce] = {}
for ae in ce.get_elements():
e = self.filters[BASE][FILTER_ELEMENT_METH]( ae, ce )
if self.filters[BASE][FILTER_SKIPPED_METH].skip( e ):
self.filters[ SKIPPED_ELEMENTS ].append( e )
continue
self.filters[ ELEMENTS ][ ce ].append( e )
fm = self.filters[ BASE ][ FILTER_CHECKSUM_METH ]( e, self.sim )
e.set_checksum( fm )
sha256 = e.getsha256()
self.filters[ HASHSUM ][ ce ].append( sha256 )
if sha256 not in self.set_els[ ce ]:
self.set_els[ ce ].add( sha256 )
self.ref_set_els[ ce ][ sha256 ] = e
self.ref_set_ident[ce][sha256] = []
self.ref_set_ident[ce][sha256].append(e)
def _init_similarity(self):
intersection_elements = self.set_els[ self.e2 ].intersection( self.set_els[ self.e1 ] )
difference_elements = self.set_els[ self.e2 ].difference( intersection_elements )
self.filters[IDENTICAL_ELEMENTS].update([ self.ref_set_els[ self.e1 ][ i ] for i in intersection_elements ])
available_e2_elements = [ self.ref_set_els[ self.e2 ][ i ] for i in difference_elements ]
# Check if some elements in the first file has been modified
for j in self.filters[ELEMENTS][self.e1]:
self.filters[ SIMILARITY_ELEMENTS ][ j ] = {}
#debug("SIM FOR %s" % (j.get_info()))
if j.getsha256() not in self.filters[HASHSUM][self.e2]:
#eln = ElsimNeighbors( j, available_e2_elements )
#for k in eln.cmp_elements():
for k in available_e2_elements:
#debug("%s" % k.get_info())
self.filters[SIMILARITY_ELEMENTS][ j ][ k ] = self.filters[BASE][FILTER_SIM_METH]( self.sim, j, k )
if j.getsha256() not in self.filters[HASHSUM_SIMILAR_ELEMENTS]:
self.filters[SIMILAR_ELEMENTS].append(j)
self.filters[HASHSUM_SIMILAR_ELEMENTS].append( j.getsha256() )
def _init_sort_elements(self):
deleted_elements = []
for j in self.filters[SIMILAR_ELEMENTS]:
#debug("SORT FOR %s" % (j.get_info()))
sort_h = self.filters[BASE][FILTER_SORT_METH]( j, self.filters[SIMILARITY_ELEMENTS][ j ], self.filters[BASE][FILTER_SORT_VALUE] )
self.filters[SIMILARITY_SORT_ELEMENTS][ j ] = set( i[0] for i in sort_h )
ret = True
if sort_h == []:
ret = False
if ret == False:
deleted_elements.append( j )
for j in deleted_elements:
self.filters[ DELETED_ELEMENTS ].append( j )
self.filters[ SIMILAR_ELEMENTS ].remove( j )
def __checksort(self, x, y):
return y in self.filters[SIMILARITY_SORT_ELEMENTS][ x ]
def _init_new_elements(self):
# Check if some elements in the second file are totally new !
for j in self.filters[ELEMENTS][self.e2]:
# new elements can't be in similar elements
if j not in self.filters[SIMILAR_ELEMENTS]:
# new elements hashes can't be in first file
if j.getsha256() not in self.filters[HASHSUM][self.e1]:
ok = True
# new elements can't be compared to another one
for diff_element in self.filters[SIMILAR_ELEMENTS]:
if self.__checksort( diff_element, j ):
ok = False
break
if ok:
if j.getsha256() not in self.filters[HASHSUM_NEW_ELEMENTS]:
self.filters[NEW_ELEMENTS].add( j )
self.filters[HASHSUM_NEW_ELEMENTS].append( j.getsha256() )
def get_similar_elements(self):
""" Return the similar elements
@rtype : a list of elements
"""
return self.get_elem( SIMILAR_ELEMENTS )
def get_new_elements(self):
""" Return the new elements
@rtype : a list of elements
"""
return self.get_elem( NEW_ELEMENTS )
def get_deleted_elements(self):
""" Return the deleted elements
@rtype : a list of elements
"""
return self.get_elem( DELETED_ELEMENTS )
def get_internal_identical_elements(self, ce):
""" Return the internal identical elements
@rtype : a list of elements
"""
return self.get_elem( INTERNAL_IDENTICAL_ELEMENTS )
def get_identical_elements(self):
""" Return the identical elements
@rtype : a list of elements
"""
return self.get_elem( IDENTICAL_ELEMENTS )
def get_skipped_elements(self):
return self.get_elem( SKIPPED_ELEMENTS )
def get_elem(self, attr):
return [ x for x in self.filters[attr] ]
def show_element(self, i, details=True):
print "\t", i.get_info()
if details:
if i.getsha256() == None:
pass
elif i.getsha256() in self.ref_set_els[self.e2]:
if len(self.ref_set_ident[self.e2][i.getsha256()]) > 1:
for ident in self.ref_set_ident[self.e2][i.getsha256()]:
print "\t\t-->", ident.get_info()
else:
print "\t\t-->", self.ref_set_els[self.e2][ i.getsha256() ].get_info()
else:
for j in self.filters[ SIMILARITY_SORT_ELEMENTS ][ i ]:
print "\t\t-->", j.get_info(), self.filters[ SIMILARITY_ELEMENTS ][ i ][ j ]
def get_element_info(self, i):
l = []
if i.getsha256() == None:
pass
elif i.getsha256() in self.ref_set_els[self.e2]:
l.append( [ i, self.ref_set_els[self.e2][ i.getsha256() ] ] )
else:
for j in self.filters[ SIMILARITY_SORT_ELEMENTS ][ i ]:
l.append( [i, j, self.filters[ SIMILARITY_ELEMENTS ][ i ][ j ] ] )
return l
def get_associated_element(self, i):
return list(self.filters[ SIMILARITY_SORT_ELEMENTS ][ i ])[0]
def get_similarity_value(self, new=True):
values = []
self.sim.set_compress_type( BZ2_COMPRESS )
for j in self.filters[SIMILAR_ELEMENTS]:
k = self.get_associated_element( j )
value = self.filters[BASE][FILTER_SIM_METH]( self.sim, j, k )
# filter value
value = self.filters[BASE][FILTER_SIM_VALUE_METH]( value )
values.append( value )
values.extend( [ self.filters[BASE][FILTER_SIM_VALUE_METH]( 0.0 ) for i in self.filters[IDENTICAL_ELEMENTS] ] )
if new == True:
values.extend( [ self.filters[BASE][FILTER_SIM_VALUE_METH]( 1.0 ) for i in self.filters[NEW_ELEMENTS] ] )
else:
values.extend( [ self.filters[BASE][FILTER_SIM_VALUE_METH]( 1.0 ) for i in self.filters[DELETED_ELEMENTS] ] )
self.sim.set_compress_type( self.compressor )
similarity_value = 0.0
for i in values:
similarity_value += (1.0 - i)
if len(values) == 0:
return 0.0
return (similarity_value/len(values)) * 100
def show(self):
print "Elements:"
print "\t IDENTICAL:\t", len(self.get_identical_elements())
print "\t SIMILAR: \t", len(self.get_similar_elements())
print "\t NEW:\t\t", len(self.get_new_elements())
print "\t DELETED:\t", len(self.get_deleted_elements())
print "\t SKIPPED:\t", len(self.get_skipped_elements())
#self.sim.show()
ADDED_ELEMENTS = "added elements"
DELETED_ELEMENTS = "deleted elements"
LINK_ELEMENTS = "link elements"
DIFF = "diff"
class Eldiff(object):
def __init__(self, elsim, F):
self.elsim = elsim
self.F = F
self._init_filters()
self._init_diff()
def _init_filters(self):
self.filters = {}
self.filters[ BASE ] = {}
self.filters[ BASE ].update( self.F )
self.filters[ ELEMENTS ] = {}
self.filters[ ADDED_ELEMENTS ] = {}
self.filters[ DELETED_ELEMENTS ] = {}
self.filters[ LINK_ELEMENTS ] = {}
def _init_diff(self):
for i, j in self.elsim.get_elements():
self.filters[ ADDED_ELEMENTS ][ j ] = []
self.filters[ DELETED_ELEMENTS ][ i ] = []
x = self.filters[ BASE ][ DIFF ]( i, j )
self.filters[ ADDED_ELEMENTS ][ j ].extend( x.get_added_elements() )
self.filters[ DELETED_ELEMENTS ][ i ].extend( x.get_deleted_elements() )
self.filters[ LINK_ELEMENTS ][ j ] = i
#self.filters[ LINK_ELEMENTS ][ i ] = j
def show(self):
for bb in self.filters[ LINK_ELEMENTS ] : #print "la"
print bb.get_info(), self.filters[ LINK_ELEMENTS ][ bb ].get_info()
print "Added Elements(%d)" % (len(self.filters[ ADDED_ELEMENTS ][ bb ]))
for i in self.filters[ ADDED_ELEMENTS ][ bb ]:
print "\t",
i.show()
print "Deleted Elements(%d)" % (len(self.filters[ DELETED_ELEMENTS ][ self.filters[ LINK_ELEMENTS ][ bb ] ]))
for i in self.filters[ DELETED_ELEMENTS ][ self.filters[ LINK_ELEMENTS ][ bb ] ]:
print "\t",
i.show()
print
def get_added_elements(self):
return self.filters[ ADDED_ELEMENTS ]
def get_deleted_elements(self):
return self.filters[ DELETED_ELEMENTS ]
| apache-2.0 |
sdss/marvin | python/marvin/utils/general/general.py | 1 | 64754 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# @Author: Brian Cherinka, José Sánchez-Gallego, and Brett Andrews
# @Date: 2017-11-01
# @Filename: general.py
# @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause)
#
# @Last modified by: José Sánchez-Gallego ([email protected])
# @Last modified time: 2019-08-29 15:58:00
from __future__ import absolute_import, division, print_function
import collections
import contextlib
import inspect
import os
import re
import sys
import warnings
from builtins import range
from collections import OrderedDict
from functools import wraps
from pkg_resources import parse_version
import matplotlib.pyplot as plt
import numpy as np
import PIL
from astropy import table, wcs
from astropy.units.quantity import Quantity
from brain.core.exceptions import BrainError
from flask_jwt_extended import get_jwt_identity
from scipy.interpolate import griddata
import marvin
from marvin import log
from marvin.core.exceptions import MarvinError, MarvinUserWarning
try:
from sdss_access import Access, AccessError
except ImportError:
Access = None
try:
from sdss_access.path import Path
except ImportError:
Path = None
try:
import pympler.summary
import pympler.muppy
import psutil
except ImportError:
pympler = None
psutil = None
# General utilities
__all__ = ('convertCoords', 'parseIdentifier', 'mangaid2plateifu', 'findClosestVector',
'getWCSFromPng', 'convertImgCoords', 'getSpaxelXY',
'downloadList', 'getSpaxel', 'get_drpall_row', 'getDefaultMapPath',
'getDapRedux', 'get_nsa_data', '_check_file_parameters',
'invalidArgs', 'missingArgs', 'getRequiredArgs', 'getKeywordArgs',
'isCallableWithArgs', 'map_bins_to_column', '_sort_dir', 'get_drpall_path',
'get_dapall_path', 'temp_setattr', 'map_dapall', 'turn_off_ion', 'memory_usage',
'validate_jwt', 'target_status', 'target_is_observed', 'target_is_mastar',
'get_plates', 'get_manga_image', 'check_versions', 'get_drpall_table',
'get_dapall_table', 'get_drpall_file', 'get_dapall_file')
drpTable = {}
dapTable = {}
def validate_jwt(f):
''' Decorator to validate a JWT and User '''
@wraps(f)
def wrapper(*args, **kwargs):
current_user = get_jwt_identity()
if not current_user:
raise MarvinError('Invalid user from API token!')
else:
marvin.config.access = 'collab'
return f(*args, **kwargs)
return wrapper
def getSpaxel(cube=True, maps=True, modelcube=True,
x=None, y=None, ra=None, dec=None, xyorig=None, **kwargs):
"""Returns the |spaxel| matching certain coordinates.
The coordinates of the spaxel to return can be input as ``x, y`` pixels
relative to``xyorig`` in the cube, or as ``ra, dec`` celestial
coordinates.
This function is intended to be called by
:func:`~marvin.tools.cube.Cube.getSpaxel` or
:func:`~marvin.tools.maps.Maps.getSpaxel`, and provides shared code for
both of them.
Parameters:
cube (:class:`~marvin.tools.cube.Cube` or None or bool)
A :class:`~marvin.tools.cube.Cube` object with the DRP cube
data from which the spaxel spectrum will be extracted. If None,
the |spaxel| object(s) returned won't contain spectral information.
maps (:class:`~marvin.tools.maps.Maps` or None or bool)
As ``cube`` but for the :class:`~marvin.tools.maps.Maps`
object representing the DAP maps entity. If None, the |spaxel|
will be returned without DAP information.
modelcube (:class:`~marvin.tools.modelcube.ModelCube` or None or bool)
As ``cube`` but for the :class:`~marvin.tools.modelcube.ModelCube`
object representing the DAP modelcube entity. If None, the |spaxel|
will be returned without model information.
x,y (int or array):
The spaxel coordinates relative to ``xyorig``. If ``x`` is an
array of coordinates, the size of ``x`` must much that of
``y``.
ra,dec (float or array):
The coordinates of the spaxel to return. The closest spaxel to
those coordinates will be returned. If ``ra`` is an array of
coordinates, the size of ``ra`` must much that of ``dec``.
xyorig ({'center', 'lower'}):
The reference point from which ``x`` and ``y`` are measured.
Valid values are ``'center'``, for the centre of the
spatial dimensions of the cube, or ``'lower'`` for the
lower-left corner. This keyword is ignored if ``ra`` and
``dec`` are defined. ``xyorig`` defaults to
``marvin.config.xyorig.``
kwargs (dict):
Arguments to be passed to `~marvin.tools.spaxel.SpaxelBase`.
Returns:
spaxels (list):
The |spaxel| objects for this cube/maps corresponding to the input
coordinates. The length of the list is equal to the number
of input coordinates.
.. |spaxel| replace:: :class:`~marvin.tools.spaxel.Spaxel`
"""
# TODO: for now let's put these imports here, but we should fix the
# circular imports soon.
import marvin.tools.cube
import marvin.tools.maps
import marvin.tools.modelcube
import marvin.tools.spaxel
# Checks that the cube and maps data are correct
assert cube or maps or modelcube, \
'Either cube, maps, or modelcube needs to be specified.'
assert isinstance(cube, (marvin.tools.cube.Cube, bool)), \
'cube is not an instance of Cube or a boolean'
assert isinstance(maps, (marvin.tools.maps.Maps, bool)), \
'maps is not an instance of Maps or a boolean'
assert isinstance(modelcube, (marvin.tools.modelcube.ModelCube, bool)), \
'modelcube is not an instance of ModelCube or a boolean'
# Checks that we have the correct set of inputs.
if x is not None or y is not None:
assert ra is None and dec is None, 'Either use (x, y) or (ra, dec)'
assert x is not None and y is not None, 'Specify both x and y'
inputMode = 'pix'
isScalar = np.isscalar(x)
x = np.atleast_1d(x)
y = np.atleast_1d(y)
coords = np.array([x, y], np.float).T
elif ra is not None or dec is not None:
assert x is None and y is None, 'Either use (x, y) or (ra, dec)'
assert ra is not None and dec is not None, 'Specify both ra and dec'
inputMode = 'sky'
isScalar = np.isscalar(ra)
ra = np.atleast_1d(ra)
dec = np.atleast_1d(dec)
coords = np.array([ra, dec], np.float).T
else:
raise ValueError('You need to specify either (x, y) or (ra, dec)')
if not xyorig:
xyorig = marvin.config.xyorig
if isinstance(maps, marvin.tools.maps.Maps):
ww = maps.wcs if inputMode == 'sky' else None
cube_shape = maps._shape
elif isinstance(cube, marvin.tools.cube.Cube):
ww = cube.wcs if inputMode == 'sky' else None
cube_shape = cube._shape
elif isinstance(modelcube, marvin.tools.modelcube.ModelCube):
ww = modelcube.wcs if inputMode == 'sky' else None
cube_shape = modelcube._shape
iCube, jCube = zip(convertCoords(coords, wcs=ww, shape=cube_shape,
mode=inputMode, xyorig=xyorig).T)
_spaxels = []
for ii in range(len(iCube[0])):
_spaxels.append(
marvin.tools.spaxel.Spaxel(jCube[0][ii], iCube[0][ii],
cube=cube, maps=maps, modelcube=modelcube, **kwargs))
if len(_spaxels) == 1 and isScalar:
return _spaxels[0]
else:
return _spaxels
def convertCoords(coords, mode='sky', wcs=None, xyorig='center', shape=None):
"""Convert input coordinates to array indices.
Converts input positions in x, y or RA, Dec coordinates to array indices
(in Numpy style) or spaxel extraction. In case of pixel coordinates, the
origin of reference (either the center of the cube or the lower left
corner) can be specified via ``xyorig``.
If ``shape`` is defined (mandatory for ``mode='pix'``, optional for
``mode='sky'``) and one or more of the resulting indices are outside the
size of the input shape, an error is raised.
This functions is mostly intended for internal use.
Parameters:
coords (array):
The input coordinates, as an array of shape Nx2.
mode ({'sky', 'pix'}:
The type of input coordinates, either `'sky'` for celestial
coordinates (in the format defined in the WCS header information),
or `'pix'` for pixel coordinates.
wcs (None or ``astropy.wcs.WCS`` object):
If ``mode='sky'``, the WCS solution from which the cube coordinates
can be derived.
xyorig (str):
If ``mode='pix'``, the reference point from which the coordinates
are measured. Valid values are ``'center'``, for the centre of the
spatial dimensions of the cube, or ``'lower'`` for the lower-left
corner.
shape (None or array):
If ``mode='pix'``, the shape of the spatial dimensions of the cube,
so that the central position can be calculated.
Returns:
result (Numpy array):
An array with the same shape as ``coords``, containing the cube
index positions for the input coordinates, in Numpy style (i.e.,
the first element being the row and the second the column).
"""
coords = np.atleast_2d(coords)
assert coords.shape[1] == 2, 'coordinates must be an array Nx2'
if mode == 'sky':
assert wcs, 'if mode==sky, wcs must be defined.'
coordsSpec = np.ones((coords.shape[0], 3), np.float32)
coordsSpec[:, :-1] = coords
cubeCoords = wcs.wcs_world2pix(coordsSpec, 0)
cubeCoords = np.fliplr(np.array(np.round(cubeCoords[:, :-1]), np.int))
elif mode in ['pix', 'pixel']:
assert xyorig, 'if mode==pix, xyorig must be defined.'
x = coords[:, 0]
y = coords[:, 1]
assert shape is not None, 'if mode==pix, shape must be defined.'
shape = np.atleast_1d(shape)
if xyorig == 'center':
yMid, xMid = shape / 2.
xCube = np.round(xMid + x)
yCube = np.round(yMid + y)
elif xyorig == 'lower':
xCube = np.round(x)
yCube = np.round(y)
else:
raise ValueError('xyorig must be center or lower.')
cubeCoords = np.array([yCube, xCube], np.int).T
else:
raise ValueError('mode must be pix or sky.')
if shape is not None:
if ((cubeCoords < 0).any() or
(cubeCoords[:, 0] > (shape[0] - 1)).any() or
(cubeCoords[:, 1] > (shape[1] - 1)).any()):
raise MarvinError('some indices are out of limits.'
'``xyorig`` is currently set to "{0}". '
'Try setting ``xyorig`` to "{1}".'
.format(xyorig, 'center' if xyorig == 'lower' else 'lower'))
return cubeCoords
def mangaid2plateifu(mangaid, mode='auto', drpall=None, drpver=None):
"""Return the plate-ifu for a certain mangaid.
Uses either the DB or the drpall file to determine the plate-ifu for
a mangaid. If more than one plate-ifu are available for a certain ifu,
and ``mode='drpall'``, the one with the higher SN2 (calculated as the sum
of redSN2 and blueSN2) will be used. If ``mode='db'``, the most recent one
will be used.
Parameters:
mangaid (str):
The mangaid for which the plate-ifu will be returned.
mode ({'auto', 'drpall', 'db', 'remote'}):
If `'drpall'` or ``'db'``, the drpall file or the local database,
respectively, will be used. If ``'remote'``, a request to the API
will be issued. If ``'auto'``, the local modes will be tried before
the remote mode.
drpall (str or None):
The path to the drpall file to use. If None, the file in
``config.drpall`` will be used.
drpver (str or None):
The DRP version to use. If None, the one in ``config.drpver`` will
be used. If ``drpall`` is defined, this value is ignored.
Returns:
plateifu (str):
The plate-ifu string for the input ``mangaid``.
"""
from marvin import config, marvindb
from marvin.api.api import Interaction
# The modes and order over which the auto mode will loop.
autoModes = ['db', 'drpall', 'remote']
assert mode in autoModes + ['auto'], 'mode={0} is not valid'.format(mode)
config_drpver, __ = config.lookUpVersions()
drpver = drpver if drpver else config_drpver
drpall = drpall if drpall else config._getDrpAllPath(drpver=drpver)
if mode == 'drpall':
# Get the drpall table from cache or fresh
drpall_table = get_drpall_table(drpver=drpver, drpall=drpall)
mangaids = np.array([mm.strip() for mm in drpall_table['mangaid']])
plateifus = drpall_table[np.where(mangaids == mangaid)]
if len(plateifus) > 1:
warnings.warn('more than one plate-ifu found for mangaid={0}. '
'Using the one with the highest SN2.'.format(mangaid),
MarvinUserWarning)
plateifus = plateifus[
[np.argmax(plateifus['bluesn2'] + plateifus['redsn2'])]]
if len(plateifus) == 0:
raise ValueError('no plate-ifus found for mangaid={0}'.format(mangaid))
return plateifus['plateifu'][0]
elif mode == 'db':
if not marvindb.isdbconnected:
raise MarvinError('no DB connection found')
if not drpver:
raise MarvinError('drpver not set.')
cubes = marvindb.session.query(marvindb.datadb.Cube).join(
marvindb.datadb.PipelineInfo, marvindb.datadb.PipelineVersion).filter(
marvindb.datadb.Cube.mangaid == mangaid,
marvindb.datadb.PipelineVersion.version == drpver).use_cache().all()
if len(cubes) == 0:
raise ValueError('no plate-ifus found for mangaid={0}'.format(mangaid))
elif len(cubes) > 1:
warnings.warn('more than one plate-ifu found for mangaid={0}. '
'Using a the one with the higest SN2'.format(mangaid),
MarvinUserWarning)
total_sn2 = [float(cube.header['BLUESN2']) + float(cube.header['REDSN2'])
for cube in cubes]
cube = cubes[np.argmax(total_sn2)]
else:
cube = cubes[0]
return '{0}-{1}'.format(cube.plate, cube.ifu.name)
elif mode == 'remote':
try:
url = marvin.config.urlmap['api']['mangaid2plateifu']['url']
response = Interaction(url.format(mangaid=mangaid))
except MarvinError as e:
raise MarvinError('API call to mangaid2plateifu failed: {0}'.format(e))
else:
plateifu = response.getData(astype=str)
if not plateifu:
if 'error' in response.results and response.results['error']:
raise MarvinError(response.results['error'])
else:
raise MarvinError('API call to mangaid2plateifu failed with error unknown.')
return plateifu
elif mode == 'auto':
for mm in autoModes:
try:
plateifu = mangaid2plateifu(mangaid, mode=mm, drpver=drpver, drpall=drpall)
return plateifu
except:
continue
raise MarvinError(
'mangaid2plateifu was not able to find a plate-ifu for '
'mangaid={0} either local or remotely.'.format(mangaid))
def findClosestVector(point, arr_shape=None, pixel_shape=None, xyorig=None):
"""Find the closest array coordinates from pixel coordinates.
Find the closest vector of array coordinates (x, y) from an input
vector of pixel coordinates (x, y).
Parameters:
point : tuple
Original point of interest in pixel units, order of (x,y)
arr_shape : tuple
Shape of data array in (x,y) order
pixel_shape : tuple
Shape of image in pixels in (x,y) order
xyorig : str
Indicates the origin point of coordinates. Set to
"relative" switches to an array coordinate system relative
to galaxy center. Default is absolute array coordinates
(x=0, y=0) = upper left corner
Returns:
minind : tuple
A tuple of array coordinates in x, y order
"""
# set as numpy arrays
arr_shape = np.array(arr_shape, dtype=int)
pixel_shape = np.array(pixel_shape, dtype=int)
# compute midpoints
xmid, ymid = arr_shape / 2
xpixmid, ypixmid = pixel_shape / 2
# default absolute array coordinates
xcoords = np.array([0, arr_shape[0]], dtype=int)
ycoords = np.array([0, arr_shape[1]], dtype=int)
# split x,y coords and pixel coords
x1, x2 = xcoords
y1, y2 = ycoords
xpix, ypix = pixel_shape
# build interpolates between array coordinates and pixel coordinates
points = [[x1, y1], [x1, y2], [xmid, ymid], [x2, y1], [x2, y2]]
values = [[0, ypix], [0, 0], [xpixmid, ypixmid], [xpix, ypix], [xpix, 0]] # full image
# values = [[xpixmid-xmid, ypixmid+ymid], [xpixmid-xmid, ypixmid-ymid], [xpixmid, ypixmid], [xpixmid+xmid, ypixmid+ymid], [xpixmid+xmid, ypixmid-ymid]] # pixels based on arr_shape
#values = [[xpixmid-x2, ypixmid+y2], [xpixmid-x2, ypixmid-y2], [xpixmid, ypixmid], [xpixmid+x2, ypixmid+y2], [xpixmid+x2, ypixmid-y2]] # pixels based on arr_shape
# make 2d array of array indices in absolute or (our) relative coordindates
arrinds = np.mgrid[x1:x2, y1:y2].swapaxes(0, 2).swapaxes(0, 1)
# interpolate a new 2d pixel coordinate array
final = griddata(points, values, arrinds)
# find minimum array vector closest to input coordinate point
diff = np.abs(point - final)
prod = diff[:, :, 0] * diff[:, :, 1]
minind = np.unravel_index(prod.argmin(), arr_shape)
# toggle relative array coordinates
if xyorig in ['relative', 'center']:
minind = np.array(minind, dtype=int)
xmin = minind[0] - xmid
ymin = ymid - minind[1]
minind = (xmin, ymin)
return minind
def getWCSFromPng(filename=None, image=None):
"""Extract any WCS info from the metadata of a PNG image.
Extracts the WCS metadata info from the PNG optical
image of the galaxy using PIL (Python Imaging Library).
Converts it to an Astropy WCS object.
Parameters:
image (object):
An existing PIL image object
filename (str):
The full path to the image
Returns:
pngwcs (WCS):
an Astropy WCS object
"""
assert any([image, filename]), 'Must provide either a PIL image object, or the full image filepath'
pngwcs = None
if filename and not image:
try:
image = PIL.Image.open(filename)
except Exception as e:
raise MarvinError('Cannot open image {0}: {1}'.format(filename, e))
else:
# Close the image
image.close()
# get metadata
meta = image.info if image else None
# parse the image metadata
mywcs = {}
if meta and 'WCSAXES' in meta.keys():
for key, val in meta.items():
try:
val = float(val)
except Exception as e:
pass
mywcs.update({key: val})
tmp = mywcs.pop('WCSAXES')
# Construct Astropy WCS
if mywcs:
pngwcs = wcs.WCS(mywcs)
return pngwcs
def convertImgCoords(coords, image, to_pix=None, to_radec=None):
"""Transform the WCS info in an image.
Convert image pixel coordinates to RA/Dec based on
PNG image metadata or vice_versa
Parameters:
coords (tuple):
The input coordindates to transform
image (str):
The full path to the image
to_pix (bool):
Set to convert to pixel coordinates
to_radec (bool):
Set to convert to RA/Dec coordinates
Returns:
newcoords (tuple):
Tuple of either (x, y) pixel coordinates
or (RA, Dec) coordinates
"""
try:
wcs = getWCSFromPng(image)
except Exception as e:
raise MarvinError('Cannot get wcs info from image {0}: {1}'.format(image, e))
if to_radec:
try:
newcoords = wcs.all_pix2world([coords], 1)[0]
except AttributeError as e:
raise MarvinError('Cannot convert coords to RA/Dec. No wcs! {0}'.format(e))
if to_pix:
try:
newcoords = wcs.all_world2pix([coords], 1)[0]
except AttributeError as e:
raise MarvinError('Cannot convert coords to image pixels. No wcs! {0}'.format(e))
return newcoords
def parseIdentifier(galid):
"""Determine if a string input is a plate, plateifu, or manga-id.
Parses a string object id and determines whether it is a
plate ID, a plate-IFU, or MaNGA-ID designation.
Parameters:
galid (str):
The string of an id name to parse
Returns:
idtype (str):
String indicating either plate, plateifu, mangaid, or None
"""
galid = str(galid)
hasdash = '-' in galid
if hasdash:
galidsplit = galid.split('-')
if int(galidsplit[0]) > 6500:
idtype = 'plateifu'
else:
idtype = 'mangaid'
else:
# check for plate
if galid.isdigit():
if len(galid) > 3:
idtype = 'plate'
else:
idtype = None
else:
idtype = None
return idtype
def getSpaxelXY(cube, plateifu, x, y):
"""Get a spaxel from a cube in the DB.
This function is mostly intended for internal use.
Parameters:
cube (SQLAlchemy object):
The SQLAlchemy object representing the cube from which to extract
the spaxel.
plateifu (str):
The corresponding plateifu of ``cube``.
x,y (int):
The coordinates of the spaxel in the database.
Returns:
spaxel (SQLAlchemy object):
The SQLAlchemy spaxel with coordinates ``(x, y)``.
"""
import sqlalchemy
mdb = marvin.marvindb
try:
spaxel = mdb.session.query(mdb.datadb.Spaxel).filter_by(cube=cube, x=x, y=y).use_cache().one()
except sqlalchemy.orm.exc.NoResultFound as e:
raise MarvinError('Could not retrieve spaxel for plate-ifu {0} at position {1},{2}: No Results Found: {3}'.format(plateifu, x, y, e))
except Exception as e:
raise MarvinError('Could not retrieve cube for plate-ifu {0} at position {1},{2}: Unknown exception: {3}'.format(plateifu, x, y, e))
return spaxel
def getDapRedux(release=None):
"""Retrieve SAS url link to the DAP redux directory.
Parameters:
release (str):
The release version of the data to download.
Defaults to Marvin config.release.
Returns:
dapredux (str):
The full redux path to the DAP MAPS
"""
if not Path:
raise MarvinError('sdss_access is not installed')
else:
# is_public = 'DR' in release
# path_release = release.lower() if is_public else None
sdss_path = Path(release=release)
release = release or marvin.config.release
drpver, dapver = marvin.config.lookUpVersions(release=release)
## hack a url version of MANGA_SPECTRO_ANALYSIS
#dapdefault = sdss_path.dir('mangadefault', drpver=drpver, dapver=dapver, plate=None, ifu=None)
#dappath = dapdefault.rsplit('/', 2)[0]
dappath = os.path.join(os.getenv("MANGA_SPECTRO_ANALYSIS"), drpver, dapver)
dapredux = sdss_path.url('', full=dappath)
return dapredux
def getDefaultMapPath(**kwargs):
"""Retrieve the default Maps path.
Uses sdss_access Path to generate a url download link to the
default MAPS file for a given MPL.
Parameters:
release (str):
The release version of the data to download.
Defaults to Marvin config.release.
plate (int):
The plate id
ifu (int):
The ifu number
mode (str):
The bintype of the default file to grab, i.e. MAPS or LOGCUBE. Defaults to MAPS
daptype (str):
The daptype of the default map to grab. Defaults to SPX-GAU-MILESHC
Returns:
maplink (str):
The sas url to download the default maps file
"""
# Get kwargs
release = kwargs.get('release', marvin.config.release)
drpver, dapver = marvin.config.lookUpVersions(release=release)
plate = kwargs.get('plate', None)
ifu = kwargs.get('ifu', None)
daptype = kwargs.get('daptype', 'SPX-GAU-MILESHC')
mode = kwargs.get('mode', 'MAPS')
assert mode in ['MAPS', 'LOGCUBE'], 'mode can either be MAPS or LOGCUBE'
# get sdss_access Path
if not Path:
raise MarvinError('sdss_access is not installed')
else:
# is_public = 'DR' in release
# path_release = release.lower() if is_public else None
sdss_path = Path(release=release)
# get the sdss_path name by MPL
# TODO: this is likely to break in future MPL/DRs. Just a heads up.
if '4' in release:
name = 'mangadefault'
else:
name = 'mangadap'
# construct the url link to default maps file
maplink = sdss_path.url(name, drpver=drpver, dapver=dapver, mpl=release,
plate=plate, ifu=ifu, daptype=daptype, mode=mode)
return maplink
def downloadList(inputlist, dltype='cube', **kwargs):
"""Download a list of MaNGA objects.
Uses sdss_access to download a list of objects
via rsync. Places them in your local sas path mimicing
the Utah SAS.
i.e. $SAS_BASE_DIR/mangawork/manga/spectro/redux
Can download cubes, rss files, maps, modelcubes, mastar cubes,
png images, default maps, or the entire plate directory.
dltype=`dap` is a special keyword that downloads all DAP files. It sets binmode
and daptype to '*'
Parameters:
inputlist (list):
Required. A list of objects to download. Must be a list of plate IDs,
plate-IFUs, or manga-ids
dltype ({'cube', 'maps', 'modelcube', 'dap', image', 'rss', 'mastar', 'default', 'plate'}):
Indicated type of object to download. Can be any of
plate, cube, image, mastar, rss, map, modelcube, or default (default map).
If not specified, the dltype defaults to cube.
release (str):
The MPL/DR version of the data to download.
Defaults to Marvin config.release.
bintype (str):
The bin type of the DAP maps to download. Defaults to *
binmode (str):
The bin mode of the DAP maps to download. Defaults to *
n (int):
The plan id number [1-12] of the DAP maps to download. Defaults to *
daptype (str):
The daptype of the default map to grab. Defaults to *
dir3d (str):
The directory where the images are located. Either 'stack' or 'mastar'. Defaults to *
verbose (bool):
Turns on verbosity during rsync
limit (int):
A limit to the number of items to download
test (bool):
If True, tests the download path construction but does not download
Returns:
If test=True, returns the list of full filepaths that will be downloaded
"""
assert isinstance(inputlist, (list, np.ndarray)), 'inputlist must be a list or numpy array'
# Get some possible keywords
# Necessary rsync variables:
# drpver, plate, ifu, dir3d, [mpl, dapver, bintype, n, mode]
verbose = kwargs.get('verbose', None)
as_url = kwargs.get('as_url', None)
release = kwargs.get('release', marvin.config.release)
drpver, dapver = marvin.config.lookUpVersions(release=release)
bintype = kwargs.get('bintype', '*')
binmode = kwargs.get('binmode', None)
daptype = kwargs.get('daptype', '*')
dir3d = kwargs.get('dir3d', '*')
n = kwargs.get('n', '*')
limit = kwargs.get('limit', None)
test = kwargs.get('test', None)
wave = 'LOG'
# check for sdss_access
if not Access:
raise MarvinError('sdss_access not installed.')
# Assert correct dltype
dltype = 'cube' if not dltype else dltype
assert dltype in ['plate', 'cube', 'mastar', 'modelcube', 'dap', 'rss', 'maps', 'image',
'default'], ('dltype must be one of plate, cube, mastar, '
'image, rss, maps, modelcube, dap, default')
assert binmode in [None, '*', 'MAPS', 'LOGCUBE'], 'binmode can only be *, MAPS or LOGCUBE'
# Assert correct dir3d
if dir3d != '*':
assert dir3d in ['stack', 'mastar'], 'dir3d must be either stack or mastar'
# Parse and retrieve the input type and the download type
idtype = parseIdentifier(inputlist[0])
if not idtype:
raise MarvinError('Input list must be a list of plates, plate-ifus, or mangaids')
# Set download type
if dltype == 'cube':
name = 'mangacube'
elif dltype == 'rss':
name = 'mangarss'
elif dltype == 'default':
name = 'mangadefault'
elif dltype == 'plate':
name = 'mangaplate'
elif dltype == 'maps':
# needs to change to include DR
if '4' in release:
name = 'mangamap'
else:
name = 'mangadap'
binmode = 'MAPS'
elif dltype == 'modelcube':
name = 'mangadap'
binmode = 'LOGCUBE'
elif dltype == 'dap':
name = 'mangadap'
binmode = '*'
daptype = '*'
elif dltype == 'mastar':
name = 'mangamastar'
elif dltype == 'image':
name = 'mangaimage'
# create rsync
rsync_access = Access(label='marvin_download', verbose=verbose, release=release)
rsync_access.remote()
# Add objects
for item in inputlist:
if idtype == 'mangaid':
try:
plateifu = mangaid2plateifu(item)
except MarvinError:
plateifu = None
else:
plateid, ifu = plateifu.split('-')
elif idtype == 'plateifu':
plateid, ifu = item.split('-')
elif idtype == 'plate':
plateid = item
ifu = '*'
rsync_access.add(name, plate=plateid, drpver=drpver, ifu=ifu, dapver=dapver, dir3d=dir3d,
mpl=release, bintype=bintype, n=n, mode=binmode, daptype=daptype,
wave=wave)
# set the stream
try:
rsync_access.set_stream()
except AccessError as e:
raise MarvinError('Error with sdss_access rsync.set_stream. AccessError: {0}'.format(e))
# get the list and download
listofitems = rsync_access.get_urls() if as_url else rsync_access.get_paths()
# print download location
item = listofitems[0] if listofitems else None
if item:
ver = dapver if dapver in item else drpver
dlpath = item[:item.rfind(ver) + len(ver)]
if verbose:
print('Target download directory: {0}'.format(dlpath))
if test:
return listofitems
else:
rsync_access.commit(limit=limit)
def _get_summary_file(name, summary_path=None, drpver=None, dapver=None):
''' Check for/download the drpall or dapall file
Checks for existence of a local summary file for the
current release set. If not found, uses sdss_access
to download it.
Parameters:
name (str):
The name of the summary file. Either drpall or dapall
summary_path (str):
The local path to either the drpall or dapall file
drpver (str):
The DRP version
dapver (str):
The DAP version
'''
assert name in ['drpall', 'dapall'], 'name must be either drpall or dapall'
from marvin import config
# # check for public release
# is_public = 'DR' in config.release
# release = config.release.lower() if is_public else None
# get drpver and dapver
config_drpver, config_dapver = config.lookUpVersions(config.release)
drpver = drpver if drpver else config_drpver
dapver = dapver if dapver else config_dapver
if name == 'drpall' and not summary_path:
summary_path = get_drpall_path(drpver)
elif name == 'dapall' and not summary_path:
summary_path = get_dapall_path(drpver, dapver)
if not os.path.isfile(summary_path):
warnings.warn('{0} file not found. Downloading it.'.format(name), MarvinUserWarning)
rsync = Access(label='get_summary_file', release=config.release)
rsync.remote()
rsync.add(name, drpver=drpver, dapver=dapver)
try:
rsync.set_stream()
except Exception as e:
raise MarvinError('Could not download the {4} file with sdss_access: '
'{0}\nTry manually downloading it for version ({1},{2}) and '
'placing it {3}'.format(e, drpver, dapver, summary_path, name))
else:
rsync.commit()
def get_drpall_file(drpver=None, drpall=None):
''' Check for/download the drpall file
Checks for existence of a local drpall file for the
current release set. If not found, uses sdss_access
to download it.
Parameters:
drpver (str):
The DRP version
drpall (str):
The local path to either the drpall file
'''
_get_summary_file('drpall', summary_path=drpall, drpver=drpver)
def get_dapall_file(drpver=None, dapver=None, dapall=None):
''' Check for/download the dapall file
Checks for existence of a local dapall file for the
current release set. If not found, uses sdss_access
to download it.
Parameters:
drpver (str):
The DRP version
dapver (str):
The DAP version
dapall (str):
The local path to either the dapall file
'''
_get_summary_file('dapall', summary_path=dapall, drpver=drpver, dapver=dapver)
def get_drpall_row(plateifu, drpver=None, drpall=None):
"""Returns a dictionary from drpall matching the plateifu."""
# get the drpall table
drpall_table = get_drpall_table(drpver=drpver, drpall=drpall, hdu='MANGA')
in_table = plateifu in drpall_table['plateifu']
# check the mastar extension
if not in_table:
drpall_table = get_drpall_table(drpver=drpver, drpall=drpall, hdu='MASTAR')
in_table = plateifu in drpall_table['plateifu']
if not in_table:
raise ValueError('No results found for {0} in drpall table'.format(plateifu))
row = drpall_table[drpall_table['plateifu'] == plateifu]
return row[0]
def _db_row_to_dict(row, remove_columns=False):
"""Converts a DB object to a dictionary."""
from sqlalchemy.inspection import inspect as sa_inspect
from sqlalchemy.ext.hybrid import hybrid_property
row_dict = collections.OrderedDict()
columns = row.__table__.columns.keys()
mapper = sa_inspect(row.__class__)
for key, item in mapper.all_orm_descriptors.items():
if isinstance(item, hybrid_property):
columns.append(key)
for col in columns:
if remove_columns and col in remove_columns:
continue
row_dict[col] = getattr(row, col)
return row_dict
def get_nsa_data(mangaid, source='nsa', mode='auto', drpver=None, drpall=None):
"""Returns a dictionary of NSA data from the DB or from the drpall file.
Parameters:
mangaid (str):
The mangaid of the target for which the NSA information will be returned.
source ({'nsa', 'drpall'}):
The data source. If ``source='nsa'``, the full NSA catalogue from the DB will
be used. If ``source='drpall'``, the subset of NSA columns included in the drpall
file will be returned.
mode ({'auto', 'local', 'remote'}):
See :ref:`mode-decision-tree`.
drpver (str or None):
The version of the DRP to use, if ``source='drpall'``. If ``None``, uses the
version set by ``marvin.config.release``.
drpall (str or None):
A path to the drpall file to use if ``source='drpall'``. If not defined, the
default drpall file matching ``drpver`` will be used.
Returns:
nsa_data (dict):
A dictionary containing the columns and values from the NSA catalogue for
``mangaid``.
"""
from marvin import config, marvindb
from .structs import DotableCaseInsensitive
valid_modes = ['auto', 'local', 'remote']
assert mode in valid_modes, 'mode must be one of {0}'.format(valid_modes)
valid_sources = ['nsa', 'drpall']
assert source in valid_sources, 'source must be one of {0}'.format(valid_sources)
log.debug('get_nsa_data: getting NSA data for mangaid=%r with source=%r, mode=%r',
mangaid, source, mode)
if mode == 'auto':
log.debug('get_nsa_data: running auto mode mode.')
try:
nsa_data = get_nsa_data(mangaid, mode='local', source=source,
drpver=drpver, drpall=drpall)
return nsa_data
except MarvinError as ee:
log.debug('get_nsa_data: local mode failed with error %s', str(ee))
try:
nsa_data = get_nsa_data(mangaid, mode='remote', source=source,
drpver=drpver, drpall=drpall)
return nsa_data
except MarvinError as ee:
raise MarvinError('get_nsa_data: failed to get NSA data for mangaid=%r in '
'auto mode with with error: %s', mangaid, str(ee))
elif mode == 'local':
if source == 'nsa':
if config.db is not None:
session = marvindb.session
sampledb = marvindb.sampledb
nsa_row = session.query(sampledb.NSA).join(sampledb.MangaTargetToNSA,
sampledb.MangaTarget).filter(
sampledb.MangaTarget.mangaid == mangaid).use_cache().all()
if len(nsa_row) == 1:
return DotableCaseInsensitive(
_db_row_to_dict(nsa_row[0], remove_columns=['pk', 'catalogue_pk']))
elif len(nsa_row) > 1:
warnings.warn('get_nsa_data: multiple NSA rows found for mangaid={0}. '
'Using the first one.'.format(mangaid), MarvinUserWarning)
return DotableCaseInsensitive(
_db_row_to_dict(nsa_row[0], remove_columns=['pk', 'catalogue_pk']))
elif len(nsa_row) == 0:
raise MarvinError('get_nsa_data: cannot find NSA row for mangaid={0}'
.format(mangaid))
else:
raise MarvinError('get_nsa_data: cannot find a valid DB connection.')
elif source == 'drpall':
plateifu = mangaid2plateifu(mangaid, drpver=drpver, drpall=drpall, mode='drpall')
log.debug('get_nsa_data: found plateifu=%r for mangaid=%r', plateifu, mangaid)
drpall_row = get_drpall_row(plateifu, drpall=drpall, drpver=drpver)
nsa_data = collections.OrderedDict()
for col in drpall_row.colnames:
if col.startswith('nsa_'):
value = drpall_row[col]
if isinstance(value, np.ndarray):
value = value.tolist()
else:
# In Astropy 2 the value would be an array of size 1
# but in Astropy 3 value is already an scalar and asscalar fails.
try:
value = np.asscalar(value)
except AttributeError:
pass
nsa_data[col[4:]] = value
return DotableCaseInsensitive(nsa_data)
elif mode == 'remote':
from marvin.api.api import Interaction
try:
if source == 'nsa':
request_name = 'nsa_full'
else:
request_name = 'nsa_drpall'
url = marvin.config.urlmap['api'][request_name]['url']
response = Interaction(url.format(mangaid=mangaid))
except MarvinError as ee:
raise MarvinError('API call to {0} failed: {1}'.format(request_name, str(ee)))
else:
if response.results['status'] == 1:
return DotableCaseInsensitive(collections.OrderedDict(response.getData()))
else:
raise MarvinError('get_nsa_data: %s', response['error'])
def _check_file_parameters(obj1, obj2):
for param in ['plateifu', 'mangaid', 'plate', '_release', 'drpver', 'dapver']:
assert_msg = ('{0} is different between {1} {2}:\n {1}.{0}: {3} {2}.{0}:{4}'
.format(param, obj1.__repr__, obj2.__repr__, getattr(obj1, param),
getattr(obj2, param)))
assert getattr(obj1, param) == getattr(obj2, param), assert_msg
def add_doc(value):
"""Wrap method to programatically add docstring."""
def _doc(func):
func.__doc__ = value
return func
return _doc
def use_inspect(func):
''' Inspect a function of arguments and keywords.
Inspects a function or class method. Uses a different inspect for Python 2 vs 3
Only tested to work with args and defaults. varargs (variable arguments)
and varkw (keyword arguments) seem to always be empty.
Parameters:
func (func):
The function or method to inspect
Returns:
A tuple of arguments, variable arguments, keywords, and default values
'''
pyver = sys.version_info.major
if pyver == 2:
args, varargs, varkw, defaults = inspect.getargspec(func)
elif pyver == 3:
sig = inspect.signature(func)
args = []
defaults = []
varargs = varkw = None
for par in sig.parameters.values():
# most parameters seem to be of this kind
if par.kind == par.POSITIONAL_OR_KEYWORD:
args.append(par.name)
# parameters with default of inspect empty are required
if par.default != inspect._empty:
defaults.append(par.default)
return args, varargs, varkw, defaults
def getRequiredArgs(func):
''' Gets the required arguments from a function or method
Uses this difference between arguments and defaults to indicate
required versus optional arguments
Parameters:
func (func):
The function or method to inspect
Returns:
A list of required arguments
Example:
>>> import matplotlib.pyplot as plt
>>> getRequiredArgs(plt.scatter)
>>> ['x', 'y']
'''
args, varargs, varkw, defaults = use_inspect(func)
if defaults:
args = args[:-len(defaults)]
return args
def getKeywordArgs(func):
''' Gets the keyword arguments from a function or method
Parameters:
func (func):
The function or method to inspect
Returns:
A list of keyword arguments
Example:
>>> import matplotlib.pyplot as plt
>>> getKeywordArgs(plt.scatter)
>>> ['edgecolors', 'c', 'vmin', 'linewidths', 'marker', 's', 'cmap',
>>> 'verts', 'vmax', 'alpha', 'hold', 'data', 'norm']
'''
args, varargs, varkw, defaults = use_inspect(func)
req_args = getRequiredArgs(func)
opt_args = list(set(args) - set(req_args))
return opt_args
def missingArgs(func, argdict, arg_type='args'):
''' Return missing arguments from an input dictionary
Parameters:
func (func):
The function or method to inspect
argdict (dict):
The argument dictionary to test against
arg_type (str):
The type of arguments to test. Either (args|kwargs|req|opt). Default is required.
Returns:
A list of missing arguments
Example:
>>> import matplotlib.pyplot as plt
>>> testdict = {'edgecolors': 'black', 'c': 'r', 'xlim': 5, 'xlabel': 9, 'ylabel': 'y', 'ylim': 6}
>>> # test for missing required args
>>> missginArgs(plt.scatter, testdict)
>>> {'x', 'y'}
>>> # test for missing optional args
>>> missingArgs(plt.scatter, testdict, arg_type='opt')
>>> ['vmin', 'linewidths', 'marker', 's', 'cmap', 'verts', 'vmax', 'alpha', 'hold', 'data', 'norm']
'''
assert arg_type in ['args', 'req', 'kwargs', 'opt'], 'arg_type must be one of (args|req|kwargs|opt)'
if arg_type in ['args', 'req']:
return set(getRequiredArgs(func)).difference(argdict)
elif arg_type in ['kwargs', 'opt']:
return set(getKeywordArgs(func)).difference(argdict)
def invalidArgs(func, argdict):
''' Return invalid arguments from an input dictionary
Parameters:
func (func):
The function or method to inspect
argdict (dict):
The argument dictionary to test against
Returns:
A list of invalid arguments
Example:
>>> import matplotlib.pyplot as plt
>>> testdict = {'edgecolors': 'black', 'c': 'r', 'xlim': 5, 'xlabel': 9, 'ylabel': 'y', 'ylim': 6}
>>> # test for invalid args
>>> invalidArgs(plt.scatter, testdict)
>>> {'xlabel', 'xlim', 'ylabel', 'ylim'}
'''
args, varargs, varkw, defaults = use_inspect(func)
return set(argdict) - set(args)
def isCallableWithArgs(func, argdict, arg_type='opt', strict=False):
''' Test if the function is callable with the an input dictionary
Parameters:
func (func):
The function or method to inspect
argdict (dict):
The argument dictionary to test against
arg_type (str):
The type of arguments to test. Either (args|kwargs|req|opt). Default is required.
strict (bool):
If True, validates input dictionary against both missing and invalid keyword arguments. Default is False
Returns:
Boolean indicating whether the function is callable
Example:
>>> import matplotlib.pyplot as plt
>>> testdict = {'edgecolors': 'black', 'c': 'r', 'xlim': 5, 'xlabel': 9, 'ylabel': 'y', 'ylim': 6}
>>> # test for invalid args
>>> isCallableWithArgs(plt.scatter, testdict)
>>> False
'''
if strict:
return not missingArgs(func, argdict, arg_type=arg_type) and not invalidArgs(func, argdict)
else:
return not invalidArgs(func, argdict)
def map_bins_to_column(column, indices):
''' Maps a dictionary of array indices to column data
Takes a given data column and a dictionary of indices (see the indices key
from output of the histgram data in :meth:`marvin.utils.plot.scatter.hist`),
and produces a dictionary with the data values from column mapped in
individual bins.
Parameters:
column (list):
A column of data
indices (dict):
A dictionary of providing a list of array indices belonging to each
bin in a histogram.
Returns:
A dictionary containing, for each binid, a list of column data in that bin.
Example:
>>>
>>> # provide a list of data in each bin of an output histogram
>>> x = np.random.random(10)*10
>>> hdata = hist(x, bins=3, return_fig=False)
>>> inds = hdata['indices']
>>> pmap = map_bins_to_column(x, inds)
>>> OrderedDict([(1,
>>> [2.5092488009906235,
>>> 1.7494530589363955,
>>> 2.5070840461208754,
>>> 2.188355400587354,
>>> 2.6987990403658992,
>>> 1.6023553861428441]),
>>> (3, [7.9214280403215875, 7.488908995456573, 7.190598204420587]),
>>> (4, [8.533028236560906])])
'''
assert isinstance(indices, dict) is True, 'indices must be a dictionary of binids'
assert len(column) == sum(map(len, indices.values())), 'input column and indices values must have same len'
coldict = OrderedDict()
colarr = np.array(column)
for key, val in indices.items():
coldict[key] = colarr[val].tolist()
return coldict
def _sort_dir(instance, class_):
"""Sort `dir()` to return child class attributes and members first.
Return the attributes and members of the child class, so that
ipython tab completion lists those first.
Parameters:
instance: Instance of `class_` (usually self).
class_: Class of `instance`.
Returns:
list: Child class attributes and members.
"""
members_array = list(zip(*inspect.getmembers(np.ndarray)))[0]
members_quantity = list(zip(*inspect.getmembers(Quantity)))[0]
members_parents = members_array + members_quantity
return_list = [it[0] for it in inspect.getmembers(class_) if it[0] not in members_parents]
return_list += vars(instance).keys()
return_list += ['value']
return return_list
def _get_summary_path(name, drpver, dapver=None):
''' Return the path for either the DRP or DAP summary file
Parameters:
name (str):
The name of the summary file, either drpall or dapall
drpver (str):
The DRP version
dapver (str):
The DAP version
'''
assert name in ['drpall', 'dapall'], 'name must be either drpall or dapall'
release = marvin.config.lookUpRelease(drpver)
# is_public = 'DR' in release
# path_release = release.lower() if is_public else None
path = Path(release=release)
all_path = path.full(name, drpver=drpver, dapver=dapver)
return all_path
def get_drpall_path(drpver):
"""Returns the path to the DRPall file for ``(drpver, dapver)``."""
drpall_path = _get_summary_path('drpall', drpver=drpver)
return drpall_path
def get_dapall_path(drpver, dapver):
"""Returns the path to the DAPall file for ``(drpver, dapver)``."""
dapall_path = _get_summary_path('dapall', drpver, dapver)
return dapall_path
@contextlib.contextmanager
def turn_off_ion(show_plot=True):
''' Turns off the Matplotlib plt interactive mode
Context manager to temporarily disable the interactive
Matplotlib plotting functionality. Useful for only returning
Figure and Axes objects
Parameters:
show_plot (bool):
If True, turns off the plotting
Example:
>>>
>>> with turn_off_ion(show_plot=False):
>>> do_some_stuff
>>>
'''
plt_was_interactive = plt.isinteractive()
if not show_plot and plt_was_interactive:
plt.ioff()
fignum_init = plt.get_fignums()
yield plt
if show_plot:
plt.ioff()
plt.show()
else:
for ii in plt.get_fignums():
if ii not in fignum_init:
plt.close(ii)
# Restores original ion() status
if plt_was_interactive and not plt.isinteractive():
plt.ion()
@contextlib.contextmanager
def temp_setattr(ob, attrs, new_values):
""" Temporarily set attributed on an object
Temporarily set an attribute on an object for the duration of the
context manager.
Parameters:
ob (object):
A class instance to set attributes on
attrs (str|list):
A list of attribute names to replace
new_values (list):
A list of new values to set as new attribute. If new_values is
None, all attributes in attrs will be set to None.
Example:
>>> c = Cube(plateifu='8485-1901')
>>> print('before', c.mangaid)
>>> with temp_setattr(c, 'mangaid', None):
>>> # do stuff
>>> print('new', c.mangaid)
>>> print('after' c.mangaid)
>>>
>>> # Output
>>> before '1-209232'
>>> new None
>>> after '1-209232'
>>>
"""
# set up intial inputs
attrs = attrs if isinstance(attrs, list) else [attrs]
if new_values:
new_values = new_values if isinstance(new_values, list) else [new_values]
else:
new_values = [new_values] * len(attrs)
assert len(attrs) == len(new_values), 'attrs and new_values must have the same length'
replaced = []
old_values = []
# grab the old values
for i, attr in enumerate(attrs):
new_value = new_values[i]
replace = False
old_value = None
if hasattr(ob, attr):
try:
if attr in ob.__dict__:
replace = True
except AttributeError:
if attr in ob.__slots__:
replace = True
if replace:
old_value = getattr(ob, attr)
replaced.append(replace)
old_values.append(old_value)
setattr(ob, attr, new_value)
# yield
yield replaced, old_values
# replace the old values
for i, attr in enumerate(attrs):
if not replaced[i]:
delattr(ob, attr)
else:
setattr(ob, attr, old_values[i])
def map_dapall(header, row):
''' Retrieves a dictionary of DAPall db column names
For a given row in the DAPall file, returns a dictionary
of corresponding DAPall database columns names with the
appropriate values.
Parameters:
header (Astropy header):
The primary header of the DAPall file
row (recarray):
A row of the DAPall binary table data
Returns:
A dictionary with db column names as keys and row data as values
Example:
>>> hdu = fits.open('dapall-v2_3_1-2.1.1.fits')
>>> header = hdu[0].header
>>> row = hdu[1].data[0]
>>> dbdict = map_dapall(header, row)
'''
# get names from header
emline_schannels = []
emline_gchannels = []
specindex_channels = []
for key, val in header.items():
if 'ELS' in key:
emline_schannels.append(val.lower().replace('-', '_').replace('.', '_'))
elif 'ELG' in key:
emline_gchannels.append(val.lower().replace('-', '_').replace('.', '_'))
elif re.search('SPI([0-9])', key):
specindex_channels.append(val.lower().replace('-', '_').replace('.', '_'))
# File column names
names = row.array.names
dbdict = {}
for col in names:
name = col.lower()
shape = row[col].shape if hasattr(row[col], 'shape') else ()
array = ''
values = row[col]
if len(shape) > 0:
channels = shape[0]
for i in range(channels):
channame = emline_schannels[i] if 'emline_s' in name else \
emline_gchannels[i] if 'emline_g' in name else \
specindex_channels[i] if 'specindex' in name else i + 1
colname = '{0}_{1}'.format(name, channame)
dbdict[colname] = values[i]
else:
dbdict[name] = values
return dbdict
def get_virtual_memory_usage_kb():
"""
The process's current virtual memory size in Kb, as a float.
Returns:
A float of the virtual memory usage
"""
assert psutil is not None, 'the psutil python package is required to run this function'
return float(psutil.Process().memory_info().vms) / 1024.0
def memory_usage(where):
"""
Print out a basic summary of memory usage.
Parameters:
where (str):
A string description of where in the code you are summarizing memory usage
"""
assert pympler is not None, 'the pympler python package is required to run this function'
mem_summary = pympler.summary.summarize(pympler.muppy.get_objects())
print("Memory summary: {0}".format(where))
pympler.summary.print_(mem_summary, limit=2)
print("VM: {0:.2f}Mb".format(get_virtual_memory_usage_kb() / 1024.0))
def target_status(mangaid, mode='auto', source='nsa', drpall=None, drpver=None):
''' Check the status of a MaNGA target
Given a mangaid, checks the status of a target. Will check if
target exists in the NSA catalog (i.e. is a proper target) and checks if
target has a corresponding plate-IFU designation (i.e. has been observed).
Returns a string status indicating if a target has been observed, has not
yet been observed, or is not a valid MaNGA target.
Parameters:
mangaid (str):
The mangaid of the target to check for observed status
mode ({'auto', 'drpall', 'db', 'remote', 'local'}):
See mode in :func:`mangaid2plateifu` and :func:`get_nsa_data`.
source ({'nsa', 'drpall'}):
The NSA catalog data source. See source in :func:`get_nsa_data`.
drpall (str or None):
The drpall file to use. See drpall in :func:`mangaid2plateifu` and :func:`get_nsa_data`.
drpver (str or None):
The DRP version to use. See drpver in :func:`mangaid2plateifu` and :func:`get_nsa_data`.
Returns:
A status of "observed", "not yet observed", or "not valid target"
'''
# check for plateifu - target has been observed
try:
plateifu = mangaid2plateifu(mangaid, mode=mode, drpver=drpver, drpall=drpall)
except (MarvinError, BrainError) as e:
plateifu = None
# check if target in NSA catalog - proper manga target
try:
nsa = get_nsa_data(mangaid, source=source, mode=mode, drpver=drpver, drpall=drpall)
except (MarvinError, BrainError) as e:
nsa = None
# return observed boolean
if plateifu and nsa:
status = 'observed'
elif not plateifu and nsa:
status = 'not yet observed'
elif not plateifu and not nsa:
status = 'not valid target'
return status
def target_is_observed(mangaid, mode='auto', source='nsa', drpall=None, drpver=None):
''' Check if a MaNGA target has been observed or not
See :func:`target_status` for full documentation.
Returns:
True if the target has been observed.
'''
# check the target status
status = target_status(mangaid, source=source, mode=mode, drpver=drpver, drpall=drpall)
return status == 'observed'
def target_is_mastar(plateifu, drpver=None, drpall=None):
''' Check if a target is bright-time MaStar target
Uses the local drpall file to check if a plateifu is a MaStar target
Parameters:
plateifu (str):
The plateifu of the target
drpver (str):
The drpver version to check against
drpall (str):
The drpall file path
Returns:
True if it is
'''
row = get_drpall_row(plateifu, drpver=drpver, drpall=drpall)
return row['srvymode'] == 'APOGEE lead'
def get_drpall_table(drpver=None, drpall=None, hdu='MANGA'):
''' Gets the drpall table
Gets the drpall table either from cache or loads it. For releases
of MPL-8 and up, galaxies are in the MANGA extension, and mastar
targets are in the MASTAR extension, specified with the hdu keyword. For
MPLs 1-7, there is only one data extension, which is read.
Parameters:
drpver (str):
The DRP release version to load. Defaults to current marvin release
drpall (str):
The full path to the drpall table. Defaults to current marvin release.
hdu (str):
The name of the HDU to read in. Default is 'MANGA'
Returns:
An Astropy Table
'''
from marvin import config
assert hdu.lower() in ['manga', 'mastar'], 'hdu can either be MANGA or MASTAR'
hdu = hdu.upper()
# get the drpall file
get_drpall_file(drpall=drpall, drpver=drpver)
# Loads the drpall table if it was not cached from a previous session.
config_drpver, __ = config.lookUpVersions()
drpver = drpver if drpver else config_drpver
# check for drpver
if drpver not in drpTable:
drpTable[drpver] = {}
# check for hdu
hduext = hdu if check_versions(drpver, 'v2_5_3') else 'MANGA'
if hdu not in drpTable[drpver]:
drpall = drpall if drpall else get_drpall_path(drpver=drpver)
data = {hduext: table.Table.read(drpall, hdu=hduext)}
drpTable[drpver].update(data)
drpall_table = drpTable[drpver][hduext]
return drpall_table
def get_dapall_table(drpver=None, dapver=None, dapall=None):
''' Gets the dapall table
Gets the dapall table either from cache or loads it. For releases
of MPL-6 and up.
Parameters:
drpver (str):
The DRP release version to load. Defaults to current marvin release
dapall (str):
The full path to the dapall table. Defaults to current marvin release.
Returns:
An Astropy Table
'''
from marvin import config
# get the dapall file
get_dapall_file(dapall=dapall, drpver=drpver, dapver=dapver)
# Loads the dapall table if it was not cached from a previous session.
config_drpver, config_dapver = config.lookUpVersions(config.release)
drpver = drpver if drpver else config_drpver
dapver = dapver if dapver else config_dapver
# check for dapver
if dapver not in dapTable:
dapall = dapall if dapall else get_dapall_path(drpver=drpver, dapver=dapver)
data = table.Table.read(dapall, hdu=1)
dapTable[dapver] = data
dapall_table = dapTable[dapver]
return dapall_table
def get_plates(drpver=None, drpall=None, release=None):
''' Get a list of unique plates from the drpall file
Parameters:
drpver (str):
The DRP release version to load. Defaults to current marvin release
drpall (str):
The full path to the drpall table. Defaults to current marvin release.
release (str):
The marvin release
Returns:
A list of plate ids
'''
assert not all([drpver, release]), 'Cannot set both drpver and release '
if release:
drpver, __ = marvin.config.lookUpVersions(release)
drpall_table = get_drpall_table(drpver=drpver, drpall=drpall)
plates = list(set(drpall_table['plate']))
return plates
def check_versions(version1, version2):
''' Compate two version ids against each other
Checks if version1 is greater than or equal to version2.
Parameters:
version1 (str):
The version to check
version2 (str):
The version to check against
Returns:
A boolean indicating if version1 is >= version2
'''
return parse_version(version1) >= parse_version(version2)
def get_manga_image(cube=None, drpver=None, plate=None, ifu=None, dir3d=None, local=None, public=None):
''' Get a MaNGA IFU optical PNG image
Parameters:
cube (Cube):
A Marvin Cube instance
drpver (str):
The drpver version
plate (str|int):
The plate id
ifu (str|int):
An IFU designation
dir3d (str):
The directory for 3d data, either 'stack' or 'mastar'
local (bool):
If True, return the local file path to the image
public (bool):
If True, use only DR releases
Returns:
A url to an IFU MaNGA image
'''
# check inputs
drpver = cube._drpver if cube else drpver
plate = cube.plate if cube else plate
ifu = cube.ifu if cube else ifu
dir3d = cube.dir3d if cube else dir3d
assert all([drpver, plate, ifu]), 'drpver, plate, and ifu must be specified'
# create the sdss Path
release = cube.release if cube else marvin.config.lookUpRelease(drpver, public_only=public)
path = Path(release=release)
dir3d = dir3d if dir3d else 'stack'
assert dir3d in ['stack', 'mastar'], 'dir3d can only be stack or mastar'
if local:
img = path.full('mangaimage', drpver=drpver, plate=plate, ifu=ifu, dir3d=dir3d)
else:
img = path.url('mangaimage', drpver=drpver, plate=plate, ifu=ifu, dir3d=dir3d)
return img
| bsd-3-clause |
cdawei/digbeta | dchen/music/src/models/PCMLC.py | 2 | 12894 | import sys
import time
import numpy as np
from sklearn.base import BaseEstimator
from scipy.sparse import issparse, isspmatrix_coo
from lbfgs import LBFGS, LBFGSError # pip install pylbfgs
from joblib import Parallel, delayed
VERBOSE = 1
N_JOBS = 3
def risk_pclassification(W, b, X, Y, P, Q, p=1):
"""
Empirical risk of p-classification loss for multilabel classification
Input:
- W: current weight matrix, K by D
- b: current bias
- X: feature matrix, N x D
- Y: positive label matrix, N x K
- p: constant for p-classification push loss
- loss_type: valid assignment is 'example' or 'label'
- 'example': compute a loss for each example, by the #positive or #negative labels per example
- 'label' : compute a loss for each label, by the #positive or #negative examples per label
Output:
- risk: empirical risk
- db : gradient of bias term
- dW : gradients of weights
"""
assert p > 0
assert Y.dtype == np.bool
assert isspmatrix_coo(Y) # scipy.sparse.coo_matrix type
N, D = X.shape
K = Y.shape[1]
assert W.shape == (K, D)
# shape = (N, 1) if loss_type == 'example' else (1, K)
assert P.shape == Q.shape
if P.shape[0] == 1:
assert P.shape[1] == K
else:
assert P.shape == (N, 1)
T1 = np.dot(X, W.T) + b
T1p = np.zeros((N, K), dtype=np.float)
T1p[Y.row, Y.col] = T1[Y.row, Y.col]
T1n = T1 - T1p
T2 = np.exp(-T1p)
T2p = np.zeros((N, K), dtype=np.float)
T2p[Y.row, Y.col] = T2[Y.row, Y.col]
T2 = T2p * P
T3 = np.exp(p * T1n)
T3[Y.row, Y.col] = 0
T3 = T3 * Q
risk = np.sum(T2 + T3 / p)
T4 = T3 - T2
db = np.sum(T4)
dW = np.dot(T4.T, X)
if np.isnan(risk) or np.isinf(risk):
sys.stderr('risk_pclassification(): risk is NaN or inf!\n')
sys.exit(0)
return risk, db, dW
class DataHelper:
"""
SciPy sparse matrix slicing is slow, as stated here:
https://stackoverflow.com/questions/42127046/fast-slicing-and-multiplication-of-scipy-sparse-csr-matrix
Profiling confirms this inefficient slicing.
This iterator aims to do slicing only once and cache the results.
"""
def __init__(self, Y, ax=0, batch_size=256):
assert ax in [0, 1]
assert issparse(Y)
self.init = False
self.ax = ax
self.starts = []
self.ends = []
self.Ys = []
self.Ps = []
self.Qs = []
num = Y.shape[self.ax]
bs = num if batch_size > num else batch_size
self.n_batches = int((num-1) / bs) + 1
Y = Y.tocsr() if self.ax == 0 else Y.tocsc()
for nb in range(self.n_batches):
ix_start = nb * bs
ix_end = min((nb + 1) * bs, num)
Yi = Y[ix_start:ix_end, :] if self.ax == 0 else Y[:, ix_start:ix_end]
numPos = Yi.sum(axis=1-self.ax).A.reshape(-1)
numNeg = Yi.shape[1-self.ax] - numPos
nz_pix = np.nonzero(numPos)[0] # taking care of zeros
nz_nix = np.nonzero(numNeg)[0]
P = np.zeros_like(numPos, dtype=np.float)
Q = np.zeros_like(numNeg, dtype=np.float)
P[nz_pix] = 1. / numPos[nz_pix] # P = 1 / numPos
Q[nz_nix] = 1. / numNeg[nz_nix] # Q = 1 / numNeg
shape = (len(P), 1) if self.ax == 0 else (1, len(P))
self.starts.append(ix_start)
self.ends.append(ix_end)
self.Ys.append(Yi.tocoo())
self.Ps.append(P.reshape(shape))
self.Qs.append(Q.reshape(shape))
self.init = True
def get_data(self):
assert self.init is True
return self.starts, self.ends, self.Ys, self.Ps, self.Qs
def accumulate_risk_label(Wt, bt, X, Y, p, data_helper):
assert data_helper is not None
assert data_helper.ax == 1
assert Wt.shape == (Y.shape[1], X.shape[1])
starts, ends, Ys, Ps, Qs = data_helper.get_data()
num = len(Ys)
results = Parallel(n_jobs=N_JOBS)(delayed(risk_pclassification)
(Wt[starts[i]:ends[i], :], bt, X, Ys[i], Ps[i], Qs[i], p=p) for i in range(num))
denom = Y.shape[1]
risk = 0.
db = 0.
dW_slices = []
for t in results:
risk += t[0] / denom
db += t[1] / denom
dW_slices.append(t[2] / denom)
dW = np.vstack(dW_slices)
return risk, db, dW
def accumulate_risk_example(Wt, bt, X, Y, p, data_helper):
assert data_helper is not None
assert data_helper.ax == 0
assert Wt.shape == (Y.shape[1], X.shape[1])
starts, ends, Ys, Ps, Qs = data_helper.get_data()
denom = Y.shape[0]
risk = 0.
db = 0.
dW = np.zeros_like(Wt)
num = len(Ys)
bs = 8
n_batches = int((num-1) / bs) + 1
indices = np.arange(num)
for nb in range(n_batches):
ixs = nb * bs
ixe = min((nb + 1) * bs, num)
ix = indices[ixs:ixe]
res = Parallel(n_jobs=N_JOBS)(delayed(risk_pclassification)
(Wt, bt, X[starts[i]:ends[i], :], Ys[i], Ps[i], Qs[i], p) for i in ix)
assert len(res) <= bs
for t in res:
assert len(t) == 3
risk += t[0] / denom
db += t[1] / denom
dW += t[2] / denom
return risk, db, dW
# def accumulate_risk(Wt, bt, X, Y, p, loss, data_helper, verbose=0):
# assert loss in ['example', 'label']
# assert data_helper is not None
# assert Wt.shape == (Y.shape[1], X.shape[1])
# ax = 0 if loss == 'example' else 1
# assert data_helper.ax == ax
# risk = 0.
# db = 0.
# dW = np.zeros_like(Wt)
# nb = 0
# for ix_start, ix_end, Yi, Pi, Qi in zip(*(data_helper.get_data())):
# nb += 1
# if verbose > 2:
# sys.stdout.write('\r%d / %d' % (nb, data_helper.n_batches))
# sys.stdout.flush()
# Xi = X[ix_start:ix_end, :] if ax == 0 else X
# Wb = Wt if ax == 0 else Wt[ix_start:ix_end, :]
# riski, dbi, dWi = risk_pclassification(Wb, bt, Xi, Yi, Pi, Qi, p=p, loss_type=loss)
# assert dWi.shape == Wb.shape
# denom = Y.shape[ax]
# risk += riski / denom
# db += dbi / denom
# if ax == 0:
# dW += dWi / denom
# else:
# dW[ix_start:ix_end, :] = dWi / denom
# if verbose > 2:
# print()
# return risk, db, dW
def multitask_regulariser(Wt, bt, cliques):
assert cliques is not None
denom = 0.
cost_mt = 0.
dW_mt = np.zeros_like(Wt)
for clq in cliques:
npl = len(clq)
if npl < 2:
continue
denom += npl * (npl - 1)
M = -1 * np.ones((npl, npl), dtype=np.float)
np.fill_diagonal(M, npl-1)
Wu = Wt[clq, :]
cost_mt += np.multiply(M, np.dot(Wu, Wu.T)).sum()
dW_mt[clq, :] = np.dot(M, Wu) # assume one playlist belongs to only one user
cost_mt /= denom
dW_mt = dW_mt * 2. / denom
return cost_mt, dW_mt
def objective(w, dw, X, Y, C1=1, C2=1, C3=1, p=1, loss_type='example', cliques=None,
data_helper_example=None, data_helper_label=None, fnpy=None):
"""
- w : np.ndarray, current weights
- dw: np.ndarray, OUTPUT array for gradients of w
- cliques: a list of arrays, each array is the indices of playlists of the same user.
To require the parameters of label_i and label_j be similar by regularising
their diff if entry (i,j) is 1 (i.e. belong to the same user).
"""
assert loss_type in ['example', 'label', 'both']
assert C1 > 0
assert C2 > 0
assert C3 > 0
assert p > 0
t0 = time.time()
N, D = X.shape
K = Y.shape[1]
assert w.shape[0] == K * D + 1
b = w[0]
W = w[1:].reshape(K, D)
if loss_type == 'both':
risk1, db1, dW1 = accumulate_risk_label(W, b, X, Y, p, data_helper=data_helper_label)
risk2, db2, dW2 = accumulate_risk_example(W, b, X, Y, p, data_helper=data_helper_example)
risk = risk1 + C2 * risk2
db = db1 + C2 * db2
dW = dW1 + C2 * dW2
elif loss_type == 'label':
risk, db, dW = accumulate_risk_label(W, b, X, Y, p, data_helper=data_helper_label)
else:
risk, db, dW = accumulate_risk_example(W, b, X, Y, p, data_helper=data_helper_example)
J = risk + np.dot(W.ravel(), W.ravel()) * 0.5 / C1
dW += W / C1
if cliques is not None:
cost_mt, dW_mt = multitask_regulariser(W, b, cliques)
J += cost_mt / C3
dW += dW_mt / C3
dw[:] = np.r_[db, dW.ravel()] # in-place assignment
if VERBOSE > 0:
print('Eval f, g: %.1f seconds used.' % (time.time() - t0))
return J
def progress(x, g, f_x, xnorm, gnorm, step, k, ls, *args):
"""
Report optimization progress.
progress: callable(x, g, fx, xnorm, gnorm, step, k, num_eval, *args)
If not None, called at each iteration after the call to f with
the current values of x, g and f(x), the L2 norms of x and g,
the line search step, the iteration number,
the number of evaluations at this iteration and args.
"""
print('Iter {:3d}: f = {:15.9f}, |g| = {:15.9f}, {}'.format(k, f_x, gnorm, time.strftime('%Y-%m-%d %H:%M:%S')))
# save intermediate weights
fnpy = args[-1]
if fnpy is not None and k > 20 and k % 10 == 0:
try:
print(fnpy)
np.save(fnpy, x, allow_pickle=False)
except (OSError, IOError, ValueError):
sys.stderr.write('Save weights to .npy file failed\n')
class PCMLC(BaseEstimator):
"""All methods are necessary for a scikit-learn estimator"""
def __init__(self, C1=1, C2=1, C3=1, p=1, loss_type='example'):
"""Initialisation"""
assert C1 > 0
assert C2 > 0
assert C3 > 0
assert p > 0
assert loss_type in ['example', 'label', 'both'], \
'Valid assignment for "loss_type" are: "example", "label", "both".'
self.C1 = C1
self.C2 = C2
self.C3 = C3
self.p = p
self.loss_type = loss_type
self.trained = False
def fit(self, X_train, Y_train, user_playlist_indices=None, batch_size=256, verbose=0, w0=None, fnpy=None):
assert X_train.shape[0] == Y_train.shape[0]
N, D = X_train.shape
K = Y_train.shape[1]
VERBOSE = verbose # set verbose output, use a global variable in this case
if VERBOSE > 0:
t0 = time.time()
if w0 is None:
if fnpy is not None:
try:
w0 = np.load(fnpy, allow_pickle=False)
assert w0.shape[0] == K * D + 1
print('Restore from %s' % fnpy)
except (IOError, ValueError):
w0 = np.zeros(K * D + 1)
else:
assert w0.shape[0] == K * D + 1
data_helper_example = None if self.loss_type == 'label' else DataHelper(Y_train, ax=0, batch_size=batch_size)
data_helper_label = None if self.loss_type == 'example' else DataHelper(Y_train, ax=1, batch_size=batch_size)
try:
# f: callable(x, g, *args)
# LBFGS().minimize(f, x0, progress=progress, args=args)
optim = LBFGS()
optim.linesearch = 'wolfe'
res = optim.minimize(objective, w0, progress,
args=(X_train, Y_train, self.C1, self.C2, self.C3, self.p, self.loss_type,
user_playlist_indices, data_helper_example, data_helper_label, fnpy))
self.b = res[0]
self.W = res[1:].reshape(K, D)
self.trained = True
except (LBFGSError, MemoryError) as err:
self.trained = False
sys.stderr.write('LBFGS failed: {0}\n'.format(err))
sys.stderr.flush()
if VERBOSE > 0:
print('Training finished in %.1f seconds' % (time.time() - t0))
def decision_function(self, X_test):
"""Make predictions (score is a real number)"""
assert self.trained is True, "Cannot make prediction before training"
return np.dot(X_test, self.W.T) + self.b # log of prediction score
def predict(self, X_test):
return self.decision_function(X_test)
# """Make predictions (score is boolean)"""
# preds = sigmoid(self.decision_function(X_test))
# return preds >= Threshold
# inherit from BaseEstimator instead of re-implement
# def get_params(self, deep = True):
# def set_params(self, **params):
| gpl-3.0 |
xzh86/scikit-learn | sklearn/ensemble/gradient_boosting.py | 126 | 65552 | """Gradient Boosted Regression Trees
This module contains methods for fitting gradient boosted regression trees for
both classification and regression.
The module structure is the following:
- The ``BaseGradientBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ in the concrete ``LossFunction`` used.
- ``GradientBoostingClassifier`` implements gradient boosting for
classification problems.
- ``GradientBoostingRegressor`` implements gradient boosting for
regression problems.
"""
# Authors: Peter Prettenhofer, Scott White, Gilles Louppe, Emanuele Olivetti,
# Arnaud Joly
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
from abc import ABCMeta, abstractmethod
from time import time
import numbers
import numpy as np
from scipy import stats
from .base import BaseEnsemble
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..utils import check_random_state, check_array, check_X_y, column_or_1d
from ..utils import check_consistent_length, deprecated
from ..utils.extmath import logsumexp
from ..utils.fixes import expit, bincount
from ..utils.stats import _weighted_percentile
from ..utils.validation import check_is_fitted, NotFittedError
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..tree.tree import DecisionTreeRegressor
from ..tree._tree import DTYPE, TREE_LEAF
from ..tree._tree import PresortBestSplitter
from ..tree._tree import FriedmanMSE
from ._gradient_boosting import predict_stages
from ._gradient_boosting import predict_stage
from ._gradient_boosting import _random_sample_mask
class QuantileEstimator(BaseEstimator):
"""An estimator predicting the alpha-quantile of the training targets."""
def __init__(self, alpha=0.9):
if not 0 < alpha < 1.0:
raise ValueError("`alpha` must be in (0, 1.0) but was %r" % alpha)
self.alpha = alpha
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.quantile = stats.scoreatpercentile(y, self.alpha * 100.0)
else:
self.quantile = _weighted_percentile(y, sample_weight, self.alpha * 100.0)
def predict(self, X):
check_is_fitted(self, 'quantile')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.quantile)
return y
class MeanEstimator(BaseEstimator):
"""An estimator predicting the mean of the training targets."""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.mean = np.mean(y)
else:
self.mean = np.average(y, weights=sample_weight)
def predict(self, X):
check_is_fitted(self, 'mean')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.mean)
return y
class LogOddsEstimator(BaseEstimator):
"""An estimator predicting the log odds ratio."""
scale = 1.0
def fit(self, X, y, sample_weight=None):
# pre-cond: pos, neg are encoded as 1, 0
if sample_weight is None:
pos = np.sum(y)
neg = y.shape[0] - pos
else:
pos = np.sum(sample_weight * y)
neg = np.sum(sample_weight * (1 - y))
if neg == 0 or pos == 0:
raise ValueError('y contains non binary labels.')
self.prior = self.scale * np.log(pos / neg)
def predict(self, X):
check_is_fitted(self, 'prior')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.prior)
return y
class ScaledLogOddsEstimator(LogOddsEstimator):
"""Log odds ratio scaled by 0.5 -- for exponential loss. """
scale = 0.5
class PriorProbabilityEstimator(BaseEstimator):
"""An estimator predicting the probability of each
class in the training data.
"""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
sample_weight = np.ones_like(y, dtype=np.float64)
class_counts = bincount(y, weights=sample_weight)
self.priors = class_counts / class_counts.sum()
def predict(self, X):
check_is_fitted(self, 'priors')
y = np.empty((X.shape[0], self.priors.shape[0]), dtype=np.float64)
y[:] = self.priors
return y
class ZeroEstimator(BaseEstimator):
"""An estimator that simply predicts zero. """
def fit(self, X, y, sample_weight=None):
if np.issubdtype(y.dtype, int):
# classification
self.n_classes = np.unique(y).shape[0]
if self.n_classes == 2:
self.n_classes = 1
else:
# regression
self.n_classes = 1
def predict(self, X):
check_is_fitted(self, 'n_classes')
y = np.empty((X.shape[0], self.n_classes), dtype=np.float64)
y.fill(0.0)
return y
class LossFunction(six.with_metaclass(ABCMeta, object)):
"""Abstract base class for various loss functions.
Attributes
----------
K : int
The number of regression trees to be induced;
1 for regression and binary classification;
``n_classes`` for multi-class classification.
"""
is_multi_class = False
def __init__(self, n_classes):
self.K = n_classes
def init_estimator(self):
"""Default ``init`` estimator for loss function. """
raise NotImplementedError()
@abstractmethod
def __call__(self, y, pred, sample_weight=None):
"""Compute the loss of prediction ``pred`` and ``y``. """
@abstractmethod
def negative_gradient(self, y, y_pred, **kargs):
"""Compute the negative gradient.
Parameters
---------
y : np.ndarray, shape=(n,)
The target labels.
y_pred : np.ndarray, shape=(n,):
The predictions.
"""
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Update the terminal regions (=leaves) of the given tree and
updates the current predictions of the model. Traverses tree
and invokes template method `_update_terminal_region`.
Parameters
----------
tree : tree.Tree
The tree object.
X : ndarray, shape=(n, m)
The data array.
y : ndarray, shape=(n,)
The target labels.
residual : ndarray, shape=(n,)
The residuals (usually the negative gradient).
y_pred : ndarray, shape=(n,)
The predictions.
sample_weight : ndarray, shape=(n,)
The weight of each sample.
sample_mask : ndarray, shape=(n,)
The sample mask to be used.
learning_rate : float, default=0.1
learning rate shrinks the contribution of each tree by
``learning_rate``.
k : int, default 0
The index of the estimator being updated.
"""
# compute leaf for each sample in ``X``.
terminal_regions = tree.apply(X)
# mask all which are not in sample mask.
masked_terminal_regions = terminal_regions.copy()
masked_terminal_regions[~sample_mask] = -1
# update each leaf (= perform line search)
for leaf in np.where(tree.children_left == TREE_LEAF)[0]:
self._update_terminal_region(tree, masked_terminal_regions,
leaf, X, y, residual,
y_pred[:, k], sample_weight)
# update predictions (both in-bag and out-of-bag)
y_pred[:, k] += (learning_rate
* tree.value[:, 0, 0].take(terminal_regions, axis=0))
@abstractmethod
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Template method for updating terminal regions (=leaves). """
class RegressionLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for regression loss functions. """
def __init__(self, n_classes):
if n_classes != 1:
raise ValueError("``n_classes`` must be 1 for regression but "
"was %r" % n_classes)
super(RegressionLossFunction, self).__init__(n_classes)
class LeastSquaresError(RegressionLossFunction):
"""Loss function for least squares (LS) estimation.
Terminal regions need not to be updated for least squares. """
def init_estimator(self):
return MeanEstimator()
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.mean((y - pred.ravel()) ** 2.0)
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * ((y - pred.ravel()) ** 2.0)))
def negative_gradient(self, y, pred, **kargs):
return y - pred.ravel()
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Least squares does not need to update terminal regions.
But it has to update the predictions.
"""
# update predictions
y_pred[:, k] += learning_rate * tree.predict(X).ravel()
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
pass
class LeastAbsoluteError(RegressionLossFunction):
"""Loss function for least absolute deviation (LAD) regression. """
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.abs(y - pred.ravel()).mean()
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * np.abs(y - pred.ravel())))
def negative_gradient(self, y, pred, **kargs):
"""1.0 if y - pred > 0.0 else -1.0"""
pred = pred.ravel()
return 2.0 * (y - pred > 0.0) - 1.0
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""LAD updates terminal regions to median estimates. """
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
diff = y.take(terminal_region, axis=0) - pred.take(terminal_region, axis=0)
tree.value[leaf, 0, 0] = _weighted_percentile(diff, sample_weight, percentile=50)
class HuberLossFunction(RegressionLossFunction):
"""Huber loss function for robust regression.
M-Regression proposed in Friedman 2001.
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
"""
def __init__(self, n_classes, alpha=0.9):
super(HuberLossFunction, self).__init__(n_classes)
self.alpha = alpha
self.gamma = None
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
gamma = self.gamma
if gamma is None:
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
if sample_weight is None:
sq_loss = np.sum(0.5 * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * (np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / y.shape[0]
else:
sq_loss = np.sum(0.5 * sample_weight[gamma_mask] * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * sample_weight[~gamma_mask] *
(np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / sample_weight.sum()
return loss
def negative_gradient(self, y, pred, sample_weight=None, **kargs):
pred = pred.ravel()
diff = y - pred
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
residual = np.zeros((y.shape[0],), dtype=np.float64)
residual[gamma_mask] = diff[gamma_mask]
residual[~gamma_mask] = gamma * np.sign(diff[~gamma_mask])
self.gamma = gamma
return residual
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
gamma = self.gamma
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
median = _weighted_percentile(diff, sample_weight, percentile=50)
diff_minus_median = diff - median
tree.value[leaf, 0] = median + np.mean(
np.sign(diff_minus_median) *
np.minimum(np.abs(diff_minus_median), gamma))
class QuantileLossFunction(RegressionLossFunction):
"""Loss function for quantile regression.
Quantile regression allows to estimate the percentiles
of the conditional distribution of the target.
"""
def __init__(self, n_classes, alpha=0.9):
super(QuantileLossFunction, self).__init__(n_classes)
assert 0 < alpha < 1.0
self.alpha = alpha
self.percentile = alpha * 100.0
def init_estimator(self):
return QuantileEstimator(self.alpha)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
alpha = self.alpha
mask = y > pred
if sample_weight is None:
loss = (alpha * diff[mask].sum() +
(1.0 - alpha) * diff[~mask].sum()) / y.shape[0]
else:
loss = ((alpha * np.sum(sample_weight[mask] * diff[mask]) +
(1.0 - alpha) * np.sum(sample_weight[~mask] * diff[~mask])) /
sample_weight.sum())
return loss
def negative_gradient(self, y, pred, **kargs):
alpha = self.alpha
pred = pred.ravel()
mask = y > pred
return (alpha * mask) - ((1.0 - alpha) * ~mask)
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
sample_weight = sample_weight.take(terminal_region, axis=0)
val = _weighted_percentile(diff, sample_weight, self.percentile)
tree.value[leaf, 0] = val
class ClassificationLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for classification loss functions. """
def _score_to_proba(self, score):
"""Template method to convert scores to probabilities.
the does not support probabilites raises AttributeError.
"""
raise TypeError('%s does not support predict_proba' % type(self).__name__)
@abstractmethod
def _score_to_decision(self, score):
"""Template method to convert scores to decisions.
Returns int arrays.
"""
class BinomialDeviance(ClassificationLossFunction):
"""Binomial deviance loss function for binary classification.
Binary classification is a special case; here, we only need to
fit one tree instead of ``n_classes`` trees.
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(BinomialDeviance, self).__init__(1)
def init_estimator(self):
return LogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
"""Compute the deviance (= 2 * negative log-likelihood). """
# logaddexp(0, v) == log(1.0 + exp(v))
pred = pred.ravel()
if sample_weight is None:
return -2.0 * np.mean((y * pred) - np.logaddexp(0.0, pred))
else:
return (-2.0 / sample_weight.sum() *
np.sum(sample_weight * ((y * pred) - np.logaddexp(0.0, pred))))
def negative_gradient(self, y, pred, **kargs):
"""Compute the residual (= negative gradient). """
return y - expit(pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step.
our node estimate is given by:
sum(w * (y - prob)) / sum(w * prob * (1 - prob))
we take advantage that: y - prob = residual
"""
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
denominator = np.sum(sample_weight * (y - residual) * (1 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = expit(score.ravel())
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class MultinomialDeviance(ClassificationLossFunction):
"""Multinomial deviance loss function for multi-class classification.
For multi-class classification we need to fit ``n_classes`` trees at
each stage.
"""
is_multi_class = True
def __init__(self, n_classes):
if n_classes < 3:
raise ValueError("{0:s} requires more than 2 classes.".format(
self.__class__.__name__))
super(MultinomialDeviance, self).__init__(n_classes)
def init_estimator(self):
return PriorProbabilityEstimator()
def __call__(self, y, pred, sample_weight=None):
# create one-hot label encoding
Y = np.zeros((y.shape[0], self.K), dtype=np.float64)
for k in range(self.K):
Y[:, k] = y == k
if sample_weight is None:
return np.sum(-1 * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
else:
return np.sum(-1 * sample_weight * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
def negative_gradient(self, y, pred, k=0, **kwargs):
"""Compute negative gradient for the ``k``-th class. """
return y - np.nan_to_num(np.exp(pred[:, k] -
logsumexp(pred, axis=1)))
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step. """
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
numerator *= (self.K - 1) / self.K
denominator = np.sum(sample_weight * (y - residual) *
(1.0 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
return np.nan_to_num(
np.exp(score - (logsumexp(score, axis=1)[:, np.newaxis])))
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class ExponentialLoss(ClassificationLossFunction):
"""Exponential loss function for binary classification.
Same loss as AdaBoost.
References
----------
Greg Ridgeway, Generalized Boosted Models: A guide to the gbm package, 2007
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(ExponentialLoss, self).__init__(1)
def init_estimator(self):
return ScaledLogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
if sample_weight is None:
return np.mean(np.exp(-(2. * y - 1.) * pred))
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * np.exp(-(2 * y - 1) * pred)))
def negative_gradient(self, y, pred, **kargs):
y_ = -(2. * y - 1.)
return y_ * np.exp(y_ * pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
pred = pred.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
y_ = 2. * y - 1.
numerator = np.sum(y_ * sample_weight * np.exp(-y_ * pred))
denominator = np.sum(sample_weight * np.exp(-y_ * pred))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = expit(2.0 * score.ravel())
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
return (score.ravel() >= 0.0).astype(np.int)
LOSS_FUNCTIONS = {'ls': LeastSquaresError,
'lad': LeastAbsoluteError,
'huber': HuberLossFunction,
'quantile': QuantileLossFunction,
'deviance': None, # for both, multinomial and binomial
'exponential': ExponentialLoss,
}
INIT_ESTIMATORS = {'zero': ZeroEstimator}
class VerboseReporter(object):
"""Reports verbose output to stdout.
If ``verbose==1`` output is printed once in a while (when iteration mod
verbose_mod is zero).; if larger than 1 then output is printed for
each update.
"""
def __init__(self, verbose):
self.verbose = verbose
def init(self, est, begin_at_stage=0):
# header fields and line format str
header_fields = ['Iter', 'Train Loss']
verbose_fmt = ['{iter:>10d}', '{train_score:>16.4f}']
# do oob?
if est.subsample < 1:
header_fields.append('OOB Improve')
verbose_fmt.append('{oob_impr:>16.4f}')
header_fields.append('Remaining Time')
verbose_fmt.append('{remaining_time:>16s}')
# print the header line
print(('%10s ' + '%16s ' *
(len(header_fields) - 1)) % tuple(header_fields))
self.verbose_fmt = ' '.join(verbose_fmt)
# plot verbose info each time i % verbose_mod == 0
self.verbose_mod = 1
self.start_time = time()
self.begin_at_stage = begin_at_stage
def update(self, j, est):
"""Update reporter with new iteration. """
do_oob = est.subsample < 1
# we need to take into account if we fit additional estimators.
i = j - self.begin_at_stage # iteration relative to the start iter
if (i + 1) % self.verbose_mod == 0:
oob_impr = est.oob_improvement_[j] if do_oob else 0
remaining_time = ((est.n_estimators - (j + 1)) *
(time() - self.start_time) / float(i + 1))
if remaining_time > 60:
remaining_time = '{0:.2f}m'.format(remaining_time / 60.0)
else:
remaining_time = '{0:.2f}s'.format(remaining_time)
print(self.verbose_fmt.format(iter=j + 1,
train_score=est.train_score_[j],
oob_impr=oob_impr,
remaining_time=remaining_time))
if self.verbose == 1 and ((i + 1) // (self.verbose_mod * 10) > 0):
# adjust verbose frequency (powers of 10)
self.verbose_mod *= 10
class BaseGradientBoosting(six.with_metaclass(ABCMeta, BaseEnsemble,
_LearntSelectorMixin)):
"""Abstract base class for Gradient Boosting. """
@abstractmethod
def __init__(self, loss, learning_rate, n_estimators, min_samples_split,
min_samples_leaf, min_weight_fraction_leaf,
max_depth, init, subsample, max_features,
random_state, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False):
self.n_estimators = n_estimators
self.learning_rate = learning_rate
self.loss = loss
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.subsample = subsample
self.max_features = max_features
self.max_depth = max_depth
self.init = init
self.random_state = random_state
self.alpha = alpha
self.verbose = verbose
self.max_leaf_nodes = max_leaf_nodes
self.warm_start = warm_start
self.estimators_ = np.empty((0, 0), dtype=np.object)
def _fit_stage(self, i, X, y, y_pred, sample_weight, sample_mask,
criterion, splitter, random_state):
"""Fit another stage of ``n_classes_`` trees to the boosting model. """
assert sample_mask.dtype == np.bool
loss = self.loss_
original_y = y
for k in range(loss.K):
if loss.is_multi_class:
y = np.array(original_y == k, dtype=np.float64)
residual = loss.negative_gradient(y, y_pred, k=k,
sample_weight=sample_weight)
# induce regression tree on residuals
tree = DecisionTreeRegressor(
criterion=criterion,
splitter=splitter,
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
max_features=self.max_features,
max_leaf_nodes=self.max_leaf_nodes,
random_state=random_state)
if self.subsample < 1.0:
# no inplace multiplication!
sample_weight = sample_weight * sample_mask.astype(np.float64)
tree.fit(X, residual, sample_weight=sample_weight,
check_input=False)
# update tree leaves
loss.update_terminal_regions(tree.tree_, X, y, residual, y_pred,
sample_weight, sample_mask,
self.learning_rate, k=k)
# add tree to ensemble
self.estimators_[i, k] = tree
return y_pred
def _check_params(self):
"""Check validity of parameters and raise ValueError if not valid. """
if self.n_estimators <= 0:
raise ValueError("n_estimators must be greater than 0 but "
"was %r" % self.n_estimators)
if self.learning_rate <= 0.0:
raise ValueError("learning_rate must be greater than 0 but "
"was %r" % self.learning_rate)
if (self.loss not in self._SUPPORTED_LOSS
or self.loss not in LOSS_FUNCTIONS):
raise ValueError("Loss '{0:s}' not supported. ".format(self.loss))
if self.loss == 'deviance':
loss_class = (MultinomialDeviance
if len(self.classes_) > 2
else BinomialDeviance)
else:
loss_class = LOSS_FUNCTIONS[self.loss]
if self.loss in ('huber', 'quantile'):
self.loss_ = loss_class(self.n_classes_, self.alpha)
else:
self.loss_ = loss_class(self.n_classes_)
if not (0.0 < self.subsample <= 1.0):
raise ValueError("subsample must be in (0,1] but "
"was %r" % self.subsample)
if self.init is not None:
if isinstance(self.init, six.string_types):
if self.init not in INIT_ESTIMATORS:
raise ValueError('init="%s" is not supported' % self.init)
else:
if (not hasattr(self.init, 'fit')
or not hasattr(self.init, 'predict')):
raise ValueError("init=%r must be valid BaseEstimator "
"and support both fit and "
"predict" % self.init)
if not (0.0 < self.alpha < 1.0):
raise ValueError("alpha must be in (0.0, 1.0) but "
"was %r" % self.alpha)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
# if is_classification
if self.n_classes_ > 1:
max_features = max(1, int(np.sqrt(self.n_features)))
else:
# is regression
max_features = self.n_features
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features)))
else:
raise ValueError("Invalid value for max_features: %r. "
"Allowed string values are 'auto', 'sqrt' "
"or 'log2'." % self.max_features)
elif self.max_features is None:
max_features = self.n_features
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if 0. < self.max_features <= 1.:
max_features = max(int(self.max_features * self.n_features), 1)
else:
raise ValueError("max_features must be in (0, n_features]")
self.max_features_ = max_features
def _init_state(self):
"""Initialize model state and allocate model state data structures. """
if self.init is None:
self.init_ = self.loss_.init_estimator()
elif isinstance(self.init, six.string_types):
self.init_ = INIT_ESTIMATORS[self.init]()
else:
self.init_ = self.init
self.estimators_ = np.empty((self.n_estimators, self.loss_.K),
dtype=np.object)
self.train_score_ = np.zeros((self.n_estimators,), dtype=np.float64)
# do oob?
if self.subsample < 1.0:
self.oob_improvement_ = np.zeros((self.n_estimators),
dtype=np.float64)
def _clear_state(self):
"""Clear the state of the gradient boosting model. """
if hasattr(self, 'estimators_'):
self.estimators_ = np.empty((0, 0), dtype=np.object)
if hasattr(self, 'train_score_'):
del self.train_score_
if hasattr(self, 'oob_improvement_'):
del self.oob_improvement_
if hasattr(self, 'init_'):
del self.init_
def _resize_state(self):
"""Add additional ``n_estimators`` entries to all attributes. """
# self.n_estimators is the number of additional est to fit
total_n_estimators = self.n_estimators
if total_n_estimators < self.estimators_.shape[0]:
raise ValueError('resize with smaller n_estimators %d < %d' %
(total_n_estimators, self.estimators_[0]))
self.estimators_.resize((total_n_estimators, self.loss_.K))
self.train_score_.resize(total_n_estimators)
if (self.subsample < 1 or hasattr(self, 'oob_improvement_')):
# if do oob resize arrays or create new if not available
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_.resize(total_n_estimators)
else:
self.oob_improvement_ = np.zeros((total_n_estimators,),
dtype=np.float64)
def _is_initialized(self):
return len(getattr(self, 'estimators_', [])) > 0
def fit(self, X, y, sample_weight=None, monitor=None):
"""Fit the gradient boosting model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
monitor : callable, optional
The monitor is called after each iteration with the current
iteration, a reference to the estimator and the local variables of
``_fit_stages`` as keyword arguments ``callable(i, self,
locals())``. If the callable returns ``True`` the fitting procedure
is stopped. The monitor can be used for various things such as
computing held-out estimates, early stopping, model introspect, and
snapshoting.
Returns
-------
self : object
Returns self.
"""
# if not warmstart - clear the estimator state
if not self.warm_start:
self._clear_state()
# Check input
X, y = check_X_y(X, y, dtype=DTYPE)
n_samples, self.n_features = X.shape
if sample_weight is None:
sample_weight = np.ones(n_samples, dtype=np.float32)
else:
sample_weight = column_or_1d(sample_weight, warn=True)
check_consistent_length(X, y, sample_weight)
y = self._validate_y(y)
random_state = check_random_state(self.random_state)
self._check_params()
if not self._is_initialized():
# init state
self._init_state()
# fit initial model - FIXME make sample_weight optional
self.init_.fit(X, y, sample_weight)
# init predictions
y_pred = self.init_.predict(X)
begin_at_stage = 0
else:
# add more estimators to fitted model
# invariant: warm_start = True
if self.n_estimators < self.estimators_.shape[0]:
raise ValueError('n_estimators=%d must be larger or equal to '
'estimators_.shape[0]=%d when '
'warm_start==True'
% (self.n_estimators,
self.estimators_.shape[0]))
begin_at_stage = self.estimators_.shape[0]
y_pred = self._decision_function(X)
self._resize_state()
# fit the boosting stages
n_stages = self._fit_stages(X, y, y_pred, sample_weight, random_state,
begin_at_stage, monitor)
# change shape of arrays after fit (early-stopping or additional ests)
if n_stages != self.estimators_.shape[0]:
self.estimators_ = self.estimators_[:n_stages]
self.train_score_ = self.train_score_[:n_stages]
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_ = self.oob_improvement_[:n_stages]
return self
def _fit_stages(self, X, y, y_pred, sample_weight, random_state,
begin_at_stage=0, monitor=None):
"""Iteratively fits the stages.
For each stage it computes the progress (OOB, train score)
and delegates to ``_fit_stage``.
Returns the number of stages fit; might differ from ``n_estimators``
due to early stopping.
"""
n_samples = X.shape[0]
do_oob = self.subsample < 1.0
sample_mask = np.ones((n_samples, ), dtype=np.bool)
n_inbag = max(1, int(self.subsample * n_samples))
loss_ = self.loss_
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
# init criterion and splitter
criterion = FriedmanMSE(1)
splitter = PresortBestSplitter(criterion,
self.max_features_,
self.min_samples_leaf,
min_weight_leaf,
random_state)
if self.verbose:
verbose_reporter = VerboseReporter(self.verbose)
verbose_reporter.init(self, begin_at_stage)
# perform boosting iterations
i = begin_at_stage
for i in range(begin_at_stage, self.n_estimators):
# subsampling
if do_oob:
sample_mask = _random_sample_mask(n_samples, n_inbag,
random_state)
# OOB score before adding this stage
old_oob_score = loss_(y[~sample_mask],
y_pred[~sample_mask],
sample_weight[~sample_mask])
# fit next stage of trees
y_pred = self._fit_stage(i, X, y, y_pred, sample_weight,
sample_mask, criterion, splitter,
random_state)
# track deviance (= loss)
if do_oob:
self.train_score_[i] = loss_(y[sample_mask],
y_pred[sample_mask],
sample_weight[sample_mask])
self.oob_improvement_[i] = (
old_oob_score - loss_(y[~sample_mask],
y_pred[~sample_mask],
sample_weight[~sample_mask]))
else:
# no need to fancy index w/ no subsampling
self.train_score_[i] = loss_(y, y_pred, sample_weight)
if self.verbose > 0:
verbose_reporter.update(i, self)
if monitor is not None:
early_stopping = monitor(i, self, locals())
if early_stopping:
break
return i + 1
def _make_estimator(self, append=True):
# we don't need _make_estimator
raise NotImplementedError()
def _init_decision_function(self, X):
"""Check input and compute prediction of ``init``. """
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, call `fit`"
" before making predictions`.")
if X.shape[1] != self.n_features:
raise ValueError("X.shape[1] should be {0:d}, not {1:d}.".format(
self.n_features, X.shape[1]))
score = self.init_.predict(X).astype(np.float64)
return score
def _decision_function(self, X):
# for use in inner loop, not raveling the output in single-class case,
# not doing input validation.
score = self._init_decision_function(X)
predict_stages(self.estimators_, X, self.learning_rate, score)
return score
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : array, shape = [n_samples, n_classes] or [n_samples]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification produce an array of shape
[n_samples].
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._decision_function(X)
if score.shape[1] == 1:
return score.ravel()
return score
def _staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._init_decision_function(X)
for i in range(self.estimators_.shape[0]):
predict_stage(self.estimators_, i, X, self.learning_rate, score)
yield score.copy()
@deprecated(" and will be removed in 0.19")
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
for dec in self._staged_decision_function(X):
# no yield from in Python2.X
yield dec
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
total_sum = np.zeros((self.n_features, ), dtype=np.float64)
for stage in self.estimators_:
stage_sum = sum(tree.feature_importances_
for tree in stage) / len(stage)
total_sum += stage_sum
importances = total_sum / len(self.estimators_)
return importances
def _validate_y(self, y):
self.n_classes_ = 1
if y.dtype.kind == 'O':
y = y.astype(np.float64)
# Default implementation
return y
class GradientBoostingClassifier(BaseGradientBoosting, ClassifierMixin):
"""Gradient Boosting for classification.
GB builds an additive model in a
forward stage-wise fashion; it allows for the optimization of
arbitrary differentiable loss functions. In each stage ``n_classes_``
regression trees are fit on the negative gradient of the
binomial or multinomial deviance loss function. Binary classification
is a special case where only a single regression tree is induced.
Read more in the :ref:`User Guide <gradient_boosting>`.
Parameters
----------
loss : {'deviance', 'exponential'}, optional (default='deviance')
loss function to be optimized. 'deviance' refers to
deviance (= logistic regression) for classification
with probabilistic outputs. For loss 'exponential' gradient
boosting recovers the AdaBoost algorithm.
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
init : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, loss_.K]
The collection of fitted sub-estimators. ``loss_.K`` is 1 for binary
classification, otherwise n_classes.
See also
--------
sklearn.tree.DecisionTreeClassifier, RandomForestClassifier
AdaBoostClassifier
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('deviance', 'exponential')
def __init__(self, loss='deviance', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, init=None, random_state=None,
max_features=None, verbose=0,
max_leaf_nodes=None, warm_start=False):
super(GradientBoostingClassifier, self).__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features,
random_state=random_state, verbose=verbose,
max_leaf_nodes=max_leaf_nodes, warm_start=warm_start)
def _validate_y(self, y):
self.classes_, y = np.unique(y, return_inverse=True)
self.n_classes_ = len(self.classes_)
return y
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : array, shape = [n_samples, n_classes] or [n_samples]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification produce an array of shape
[n_samples].
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._decision_function(X)
if score.shape[1] == 1:
return score.ravel()
return score
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
for dec in self._staged_decision_function(X):
# no yield from in Python2.X
yield dec
def predict(self, X):
"""Predict class for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y: array of shape = ["n_samples]
The predicted values.
"""
score = self.decision_function(X)
decisions = self.loss_._score_to_decision(score)
return self.classes_.take(decisions, axis=0)
def staged_predict(self, X):
"""Predict class at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
for score in self._staged_decision_function(X):
decisions = self.loss_._score_to_decision(score)
yield self.classes_.take(decisions, axis=0)
def predict_proba(self, X):
"""Predict class probabilities for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
score = self.decision_function(X)
try:
return self.loss_._score_to_proba(score)
except NotFittedError:
raise
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : array of shape = [n_samples]
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
return np.log(proba)
def staged_predict_proba(self, X):
"""Predict class probabilities at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
try:
for score in self._staged_decision_function(X):
yield self.loss_._score_to_proba(score)
except NotFittedError:
raise
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
class GradientBoostingRegressor(BaseGradientBoosting, RegressorMixin):
"""Gradient Boosting for regression.
GB builds an additive model in a forward stage-wise fashion;
it allows for the optimization of arbitrary differentiable loss functions.
In each stage a regression tree is fit on the negative gradient of the
given loss function.
Read more in the :ref:`User Guide <gradient_boosting>`.
Parameters
----------
loss : {'ls', 'lad', 'huber', 'quantile'}, optional (default='ls')
loss function to be optimized. 'ls' refers to least squares
regression. 'lad' (least absolute deviation) is a highly robust
loss function solely based on order information of the input
variables. 'huber' is a combination of the two. 'quantile'
allows quantile regression (use `alpha` to specify the quantile).
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
alpha : float (default=0.9)
The alpha-quantile of the huber loss function and the quantile
loss function. Only if ``loss='huber'`` or ``loss='quantile'``.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
`init` : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, 1]
The collection of fitted sub-estimators.
See also
--------
DecisionTreeRegressor, RandomForestRegressor
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('ls', 'lad', 'huber', 'quantile')
def __init__(self, loss='ls', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, init=None, random_state=None,
max_features=None, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False):
super(GradientBoostingRegressor, self).__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features,
random_state=random_state, alpha=alpha, verbose=verbose,
max_leaf_nodes=max_leaf_nodes, warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
The predicted values.
"""
X = check_array(X, dtype=DTYPE, order="C")
return self._decision_function(X).ravel()
def staged_predict(self, X):
"""Predict regression target at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
for y in self._staged_decision_function(X):
yield y.ravel()
| bsd-3-clause |
mespe/SolRad | collection/compare_cimis_cfsr/compare_levels.py | 1 | 2960 | import pandas as pd
import matplotlib.pyplot as plt
from netCDF4 import Dataset
import netCDF4
def load_CFSR_data():
my_example_nc_file = 'RES.nc' # latitude, longitude = (39.5, -122)
fh = Dataset(my_example_nc_file, mode='r')
print(fh.variables.keys())
print(help(fh.variables['time']))
print(fh.variables['time'].name)
####time = fh['time'][:]
####print(time)
times = fh.variables['time']
time_np = netCDF4.num2date(times[:],times.units) -pd.offsets.Hour(8)
#print(time_np.shape)
variables = {"SHTFL_L1_Avg_1" : "Sensible heat flux",
"DSWRF_L1_Avg_1" : "Downward shortwave radiation flux",
"CSDSF_L1_Avg_1" : "Clear sky downward solar flux",
"DSWRF_L1_Avg_1" : "Downward shortwave radiation flux",
"DLWRF_L1_Avg_1" : "Downward longwave radiation flux",
"CSULF_L1_Avg_1" : "Clear sky upward longwave flux",
"GFLUX_L1_Avg_1" : "Ground heat flux"}
#downward_solar_flux_np = fh.variables["DSWRF_L1_Avg_1"][:,0,0] + fh.variables["DLWRF_L1_Avg_1"][:,0,0]- fh.variables["USWRF_L1_Avg_1"][:,0,0] - fh.variables["ULWRF_L1_Avg_1"][:,0,0]
downward_solar_flux_ground = fh.variables["CSDSF_L1_Avg_1"][:, 0, 0]
#(fh.variables["SHTFL_L1_Avg_1"][:,0,0] + fh.variables["LHTFL_L1_Avg_1"][:,0,0] +
#fh.variables["DSWRF_L1_Avg_1"][:,0,0] + fh.variables["DLWRF_L1_Avg_1"][:,0,0] -
#fh.variables["USWRF_L1_Avg_1"][:,0,0] - fh.variables["ULWRF_L1_Avg_1"][:,0,0] +
#fh.variables["GFLUX_L1_Avg_1"][:,0,0] )
downward_solar_flux_atm = fh.variables["DSWRF_L8_Avg_1"][:, 0, 0]
#print(downward_solar_flux_np.shape)
df = pd.DataFrame({'datetime': time_np, 'solar rad': downward_solar_flux_ground})
#plt.plot(df['time'][:100], df['solar'][:100])
# save to a pickle file
df.to_pickle('pes_ground_sf.pkl')
#'CSDSF_L1_Avg_1'
df = pd.DataFrame({'datetime': time_np, 'solar rad': downward_solar_flux_atm})
df.to_pickle('pes_atm_sf.pkl')
for key in fh.variables.keys():
variable = fh.variables[key]
#variable = fh.variables[key][:]
print(variable)
print()
def compare():
ground = pd.read_pickle('pes_ground_sf.pkl')
atm = pd.read_pickle('pes_atm_sf.pkl')
plt.plot(ground['datetime'][:1500], ground['solar rad'][:1500], label = "gournd", alpha = 0.5)
plt.plot(atm['datetime'][:1500], atm['solar rad'][:1500], label = "atm", alpha = 0.5)
plt.legend()
#for i in range(10):
# print(cfsr['datetime'][i], cimis['datetime'][i])
#print(cimis['datetime'][i])
#load_CFSR_data()
compare() | mit |
Tastalian/pymanoid | pymanoid/body.py | 3 | 24049 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2020 Stephane Caron <[email protected]>
#
# This file is part of pymanoid <https://github.com/stephane-caron/pymanoid>.
#
# pymanoid is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# pymanoid is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# pymanoid. If not, see <http://www.gnu.org/licenses/>.
import openravepy
from numpy import array, dot, eye, hstack, ndarray, vstack, zeros
from .misc import matplotlib_to_rgb, norm
from .sim import get_openrave_env
from .transformations import crossmat, rotation_matrix_from_rpy, rpy_from_quat
class Body(object):
"""
Base class for rigid bodies. Wraps OpenRAVE's KinBody type.
Parameters
----------
rave_body : openravepy.KinBody
OpenRAVE body to wrap.
pos : array, shape=(3,), optional
Initial position in inertial frame.
rpy : array, shape=(3,), optional
Initial orientation in inertial frame.
pose : array, shape=(7,), optional
Initial pose. Supersedes ``pos`` and ``rpy`` if they are provided at
the same time.
color : char, optional
Color code in matplotlib convention ('b' for blue, 'g' for green, ...).
visible : bool, optional
Visibility in the GUI.
"""
count = 0
def __init__(self, rave_body, pos=None, rpy=None, pose=None, color=None,
visible=True):
self.color = color
self.rave = rave_body
if not rave_body.GetName():
self.set_name("%s%s" % (type(self).__name__, Body.count))
Body.count += 1
if pos is not None:
self.set_pos(pos)
if rpy is not None:
self.set_rpy(rpy)
if pose is not None:
self.set_pose(pose)
if color is not None:
self.set_color(color)
if not visible:
self.hide()
def __str__(self):
return "pymanoid.Body('%s')" % self.name
def set_color(self, color):
"""
Set the color of the rigid body.
Parameters
----------
color : tuple or string
RGB tuple, or color code in matplotlib convention.
"""
if isinstance(color, str):
color = matplotlib_to_rgb(color)
for link in self.rave.GetLinks():
for geom in link.GetGeometries():
geom.SetAmbientColor(color)
geom.SetDiffuseColor(color)
self.color = color
def set_name(self, name):
"""
Set body name in OpenRAVE scope.
name : string
Body name.
"""
self.rave.SetName(name)
def set_transparency(self, transparency):
"""
Set the transparency of the rigid body.
Parameters
----------
transparency : double, optional
Transparency value from 0 (opaque) to 1 (invisible).
"""
for link in self.rave.GetLinks():
for geom in link.GetGeometries():
geom.SetTransparency(transparency)
def show(self):
"""Make the body visible."""
self.rave.SetVisible(True)
def hide(self):
"""Make the body invisible."""
self.rave.SetVisible(False)
@property
def index(self):
"""
OpenRAVE index of the body.
Notes
-----
This index is notably used to compute jacobians and hessians.
"""
return self.rave.GetIndex()
@property
def name(self):
"""Body name."""
return str(self.rave.GetName())
@property
def T(self):
"""
Homogeneous coordinates of the rigid body.
These coordinates describe the orientation and position of the rigid
body by the 4 x 4 transformation matrix
.. math::
T = \\left[
\\begin{array}{cc}
R & p \\\\
0_{1 \\times 3} & 1
\\end{array}
\\right]
where `R` is a `3 x 3` rotation matrix and `p` is the vector of
position coordinates.
Notes
-----
More precisely, `T` is the transformation matrix *from* the body frame
*to* the world frame: if
:math:`\\tilde{p}_\\mathrm{body} = [x\\ y\\ z\\ 1]` denotes the
homogeneous coordinates of a point in the body frame, then the
homogeneous coordinates of this point in the world frame are
:math:`\\tilde{p}_\\mathrm{world} = T \\tilde{p}_\\mathrm{body}`.
"""
return self.rave.GetTransform()
@property
def transform(self):
"""
Homogeneous coordinates of the rigid body.
These coordinates describe the orientation and position of the rigid
body by the 4 x 4 transformation matrix
.. math::
T = \\left[
\\begin{array}{cc}
R & p \\\\
0_{1 \\times 3} & 1
\\end{array}
\\right]
where `R` is a `3 x 3` rotation matrix and `p` is the vector of
position coordinates.
Notes
-----
More precisely, `T` is the transformation matrix *from* the body frame
*to* the world frame: if
:math:`\\tilde{p}_\\mathrm{body} = [x\\ y\\ z\\ 1]` denotes the
homogeneous coordinates of a point in the body frame, then the
homogeneous coordinates of this point in the world frame are
:math:`\\tilde{p}_\\mathrm{world} = T \\tilde{p}_\\mathrm{body}`.
"""
return self.T
@property
def pose(self):
"""
Body pose as a 7D quaternion + position vector.
The pose vector :math:`[q_w\\,q_x\\,q_y\\,q_z\\,x\\,y\\,z]` consists of
a quaternion :math:`q = [q_w\\,q_x\\,q_y\\,q_z]` (with the real term
:math:`q_w` coming first) for the body orientation, followed by the
coordinates :math:`p = [x\\,y\\,z]` in the world frame.
"""
pose = self.rave.GetTransformPose()
if pose[0] < 0: # convention: cos(alpha) > 0
# this convention enforces Slerp shortest path
pose[:4] *= -1
return pose
@property
def R(self):
"""Rotation matrix `R` from local to world coordinates."""
return self.T[0:3, 0:3]
@property
def rotation_matrix(self):
"""Rotation matrix `R` from local to world coordinates."""
return self.R
@property
def p(self):
"""Position coordinates `[x y z]` in the world frame."""
return self.T[0:3, 3]
@property
def pos(self):
"""Position coordinates `[x y z]` in the world frame."""
return self.p
@property
def x(self):
"""`x`-coordinate in the world frame."""
return self.p[0]
@property
def y(self):
"""`y`-coordinate in the world frame."""
return self.p[1]
@property
def z(self):
"""`z`-coordinate in the world frame."""
return self.p[2]
@property
def t(self):
"""Tangent vector directing the `x`-axis of the body frame."""
return self.T[0:3, 0]
@property
def b(self):
"""Binormal vector directing the `y`-axis of the body frame."""
return self.T[0:3, 1]
@property
def n(self):
"""Normal vector directing the `z`-axis of the body frame."""
return self.T[0:3, 2]
@property
def normal(self):
"""Normal vector directing the `z`-axis of the body frame."""
return self.T[0:3, 2]
@property
def quat(self):
"""Quaternion of the rigid body orientation."""
return self.pose[0:4]
@property
def rpy(self):
"""
Roll-pitch-yaw angles.
They correspond to Euleur angles for the sequence (1, 2, 3). See
[Diebel06]_ for details.
"""
return rpy_from_quat(self.quat)
@property
def roll(self):
"""Roll angle of the body orientation."""
return self.rpy[0]
@property
def pitch(self):
"""Pitch angle of the body orientation."""
return self.rpy[1]
@property
def yaw(self):
"""Yaw angle of the body orientation."""
return self.rpy[2]
def set_transform(self, T):
"""
Set homogeneous coordinates of the rigid body.
Parameters
----------
T : array, shape=(4, 4)
Transform matrix.
"""
self.rave.SetTransform(T)
def set_pos(self, pos):
"""
Set the position of the body in the world frame.
Parameters
----------
pos : array, shape=(3,)
3D vector of position coordinates.
"""
T = self.T.copy()
T[:3, 3] = pos
self.set_transform(T)
def set_rotation_matrix(self, R):
"""
Set the orientation of the rigid body.
Recall that this orientation is described by the rotation matrix `R`
*from* the body frame *to* the world frame.
Parameters
----------
R : (3, 3) array
Rotation matrix.
"""
T = self.T.copy()
T[:3, :3] = R
self.set_transform(T)
def set_x(self, x):
"""
Set the `x`-coordinate of the body in the world frame.
Parameters
----------
x : scalar
New `x`-coordinate.
"""
T = self.T.copy()
T[0, 3] = x
self.set_transform(T)
def set_y(self, y):
"""
Set the `y`-coordinate of the body in the world frame.
Parameters
----------
y : scalar
New `y`-coordinate.
"""
T = self.T.copy()
T[1, 3] = y
self.set_transform(T)
def set_z(self, z):
"""
Set the `z`-coordinate of the body in the world frame.
Parameters
----------
z : scalar
New `z`-coordinate.
"""
T = self.T.copy()
T[2, 3] = z
self.set_transform(T)
def set_rpy(self, rpy):
"""
Set the roll-pitch-yaw angles of the body orientation.
Parameters
----------
rpy : scalar triplet
Triplet `(r, p, y)` of roll-pitch-yaw angles.
"""
T = self.T.copy()
T[0:3, 0:3] = rotation_matrix_from_rpy(rpy)
self.set_transform(T)
def set_roll(self, roll):
"""
Set the roll angle of the body orientation.
Parameters
----------
roll : scalar
Roll angle in [rad].
"""
return self.set_rpy([roll, self.pitch, self.yaw])
def set_pitch(self, pitch):
"""
Set the pitch angle of the body orientation.
Parameters
----------
pitch : scalar
Pitch angle in [rad].
"""
return self.set_rpy([self.roll, pitch, self.yaw])
def set_yaw(self, yaw):
"""
Set the yaw angle of the body orientation.
Parameters
----------
yaw : scalar
Yaw angle in [rad].
"""
return self.set_rpy([self.roll, self.pitch, yaw])
def set_pose(self, pose):
"""
Set the 7D pose of the body orientation.
Parameters
----------
pose : (7,) array
Pose of the body, i.e. quaternion + position in world frame.
"""
T = openravepy.matrixFromPose(pose)
self.set_transform(T)
def set_quat(self, quat):
"""
Set the quaternion of the body orientation.
Parameters
----------
quat : (4,) array
Quaternion in (w, x, y, z) format.
"""
pose = self.pose.copy()
pose[0:4] = quat
self.set_pose(pose)
def translate(self, translation):
"""
Apply a translation to the body.
Parameters
----------
translation : (3,) array
Offset to apply to the position (world coordinates) of the body.
"""
self.set_pos(self.p + translation)
def remove(self):
"""
Remove body from OpenRAVE environment.
"""
env = get_openrave_env()
with env:
env.Remove(self.rave)
def __del__(self):
"""
Add body removal to garbage collection step (effective).
"""
try:
self.remove()
except Exception: # __del__ exceptions are ignored
pass
def apply_twist(self, v, omega, dt):
"""
Apply a twist :math:`[v\\ \\omega]` defined in the local coordinate
frame.
Parameters
----------
v : (3,) array
Linear velocity in local frame.
omega : (3,) array
Angular velocity in local frame.
dt : scalar
Duration of twist application in [s].
"""
self.set_pos(self.p + v * dt)
self.set_rotation_matrix(self.R + dot(crossmat(omega), self.R) * dt)
def dist(self, point):
"""
Distance from the body frame origin to another point.
Parameters
----------
point : array or Point
Point to compute the distance to.
"""
if isinstance(point, list):
point = array(point)
if isinstance(point, ndarray):
return norm(point - self.p)
return norm(point.p - self.p)
@property
def adjoint_matrix(self):
"""
Adjoint matrix converting wrenches in the local frame :math:`\\cal L`
to the inertial frame :math:`\\cal W`, that is the matrix
:math:`{}^{\\cal W}A_{\\cal L}` such that:
.. math::
{}^{\\cal W}w = {}^{\\cal W}A_{\\cal L} {}^{\\cal L} w
Returns
-------
A : array
Adjoint matrix.
"""
return vstack([
hstack([self.R, eye(3)]),
hstack([dot(crossmat(self.p), self.R), self.R])])
class Manipulator(Body):
"""
Manipulators are special bodies with an end-effector property.
Parameters
----------
manipulator : openravepy.KinBody
OpenRAVE manipulator object.
pos : array, shape=(3,), optional
Initial position in inertial frame.
rpy : array, shape=(3,), optional
Initial orientation in inertial frame.
pose : array, shape=(7,), optional
Initial pose. Supersedes ``pos`` and ``rpy`` if they are provided at
the same time.
color : char, optional
Color code in matplotlib convention ('r' for red, 'b' for blue, etc.).
visible : bool, optional
Visibility in the GUI.
shape : (scalar, scalar), optional
Dimensions (half-length, half-width) of a contact patch in [m].
friction : scalar, optional
Static friction coefficient for potential contacts.
"""
def __init__(self, manipulator, pos=None, rpy=None, pose=None,
color=None, visible=True, shape=None, friction=None):
super(Manipulator, self).__init__(
manipulator, pos=pos, rpy=rpy, pose=pose, color=color,
visible=visible)
self.end_effector = manipulator.GetEndEffector()
self.friction = friction
self.shape = shape
self.wrench = None
def get_contact(self, pos=None, shape=None):
"""
Get contact located at the current manipulator pose.
Parameters
----------
pos : (3,) array, optional
Override manipulator position with this one.
shape : (scalar, scalar), optional
Dimensions (half-length, half-width) of contact patch in [m].
Returns
-------
contact : Contact
Contact located at manipulator pose.
"""
from contact import Contact
pose = self.pose.copy()
if pos is not None:
pose[4:] = pos
shape = self.shape if shape is None else shape
if shape is None:
raise Exception("Please provide a shape for the contact area")
return Contact(
shape, pose=pose, friction=self.friction, link=self)
@property
def force(self):
"""
Resultant of contact forces applied on the effector (if defined).
Coordinates are given in the end-effector frame.
"""
if self.wrench is None:
return None
return self.wrench[0:3]
@property
def index(self):
"""
Index used in Jacobian and Hessian computations.
"""
return self.end_effector.GetIndex()
@property
def moment(self):
"""
Moment of contact forces applied on the effector (if defined).
Coordinates are given in the end-effector frame.
"""
if self.wrench is None:
return None
return self.wrench[3:6]
class Box(Body):
"""
Rectangular box.
Parameters
----------
X : scalar
Box half-length in [m].
Y : scalar
Box half-width in [m].
Z : scalar
Box half-height in [m].
pos : array, shape=(3,)
Initial position in the world frame.
rpy : array, shape=(3,)
Initial orientation in the world frame.
pose : array, shape=(7,)
Initial pose in the world frame.
color : char
Color letter in ['r', 'g', 'b'].
visible : bool, optional
Visibility in the GUI.
dZ : scalar, optional
Shift in box normal coordinates used to make Contact slabs.
"""
def __init__(self, X, Y, Z, pos=None, rpy=None, pose=None, color='r',
visible=True, dZ=0.):
aabb = [0., 0., dZ, X, Y, Z]
env = get_openrave_env()
with env:
box = openravepy.RaveCreateKinBody(env, '')
box.InitFromBoxes(array([array(aabb)]), True)
super(Box, self).__init__(
box, pos=pos, rpy=rpy, pose=pose, color=color, visible=visible)
env.Add(box, True)
class Cube(Box):
"""
Cube.
Parameters
----------
size : scalar
Half-length of a side of the cube in [m].
pos : array, shape=(3,)
Initial position in the world frame.
rpy : array, shape=(3,)
Initial orientation in the world frame.
pose : array, shape=(7,)
Initial pose in the world frame.
color : char
Color letter in ['r', 'g', 'b'].
visible : bool, optional
Visibility in the GUI.
"""
def __init__(self, size, pos=None, rpy=None, pose=None, color='r',
visible=True):
super(Cube, self).__init__(
size, size, size, pos=pos, rpy=rpy, pose=pose, color=color,
visible=visible)
class Point(Cube):
"""
Points represented by cubes with a default size.
Parameters
----------
pos : array, shape=(3,)
Initial position in the world frame.
vel : array, shape=(3,), optional
Initial velocity in the world frame.
accel : array, shape=(3,), optional
Initial acceleration in the world frame.
size : scalar, optional
Half-length of a side of the cube in [m].
color : char
Color letter in ['r', 'g', 'b'].
visible : bool, optional
Visibility in the GUI.
"""
def __init__(self, pos, vel=None, accel=None, size=0.01, color='r',
visible=True):
super(Point, self).__init__(
size, pos=pos, color=color, visible=visible)
self.__pd = zeros(3) if vel is None else array(vel)
self.__pdd = zeros(3) if accel is None else array(accel)
def copy(self, color='r', visible=True):
"""
Copy constructor.
Parameters
----------
color : char, optional
Color of the copy, in ['r', 'g', 'b'].
visible : bool, optional
Should the copy be visible?
"""
return Point(self.p, self.pd, color=color, visible=visible)
@property
def pd(self):
"""Point velocity."""
return self.__pd.copy()
@property
def xd(self):
"""Point velocity along x-axis."""
return self.__pd[0]
@property
def yd(self):
"""Point velocity along y-axis."""
return self.__pd[1]
@property
def zd(self):
"""Point velocity along z-axis."""
return self.__pd[2]
def set_vel(self, pd):
"""
Update the point velocity.
Parameters
----------
pd : array, shape=(3,)
Velocity coordinates in the world frame.
"""
self.__pd = array(pd)
@property
def pdd(self):
"""Point acceleration."""
return self.__pdd.copy()
@property
def xdd(self):
"""Point acceleration along x-axis."""
return self.__pdd[0]
@property
def ydd(self):
"""Point acceleration along y-axis."""
return self.__pdd[1]
@property
def zdd(self):
"""Point acceleration along z-axis."""
return self.__pdd[2]
def set_accel(self, pdd):
"""
Update the point acceleration.
Parameters
----------
pdd : array, shape=(3,)
Acceleration coordinates in the world frame.
"""
self.__pdd = array(pdd)
def integrate_constant_accel(self, pdd, dt):
"""
Apply Euler integration for a constant acceleration.
Parameters
----------
pdd : array, shape=(3,)
Point acceleration in the world frame.
dt : scalar
Duration in [s].
"""
self.set_pos(self.p + (self.pd + .5 * pdd * dt) * dt)
self.set_vel(self.pd + pdd * dt)
self.set_accel(pdd)
def integrate_constant_jerk(self, pddd, dt):
"""
Apply Euler integration for a constant jerk.
Parameters
----------
pddd : array, shape=(3,)
Point jerk in the world frame.
dt : scalar
Duration in [s].
"""
self.set_pos(self.p + dt * (
self.pd + .5 * dt * (self.pdd + dt * pddd / 3.)))
self.set_vel(self.pd + dt * (self.pdd + dt * pddd / 2.))
self.set_accel(self.pdd + dt * pddd)
class PointMass(Point):
"""
Point with a mass property and a size proportional to it.
Parameters
----------
pos : (3,) array
Initial position in the world frame.
mass : scalar
Total mass in [kg].
vel : (3,) array, optional
Initial velocity in the world frame.
color : char, optional
Color letter in ['r', 'g', 'b'].
visible : bool, optional
Visibility in the GUI.
size : scalar, optional
Half-length of a side of the CoM cube handle, in [m].
"""
def __init__(self, pos, mass, vel=None, color='r', visible=True,
size=None):
if size is None:
size = max(5e-3, 6e-4 * mass)
super(PointMass, self).__init__(
pos, vel=vel, size=size, color=color, visible=visible)
self.mass = mass
def copy(self, color='r', visible=True):
"""
Copy constructor.
Parameters
----------
color : char, optional
Color of the copy, in ['r', 'g', 'b'].
visible : bool, optional
Should the copy be visible?
"""
return PointMass(
self.p, self.mass, self.pd, color=color, visible=visible)
@property
def momentum(self):
"""Linear momentum in the world frame."""
return self.mass * self.pd
| gpl-3.0 |
AntonSever/umat | endpoints/advertiser_report_log.py | 1 | 1518 | # -*- coding: utf-8 -*-
from pandas import read_csv
from .advertiser_report import AdvertiserReport
from .params.filter import Field
from .params.v3 import Params
from .service.export import (
Export,
MatExportError
)
from .service.util import print_bold
class AdvertiserReportLog(AdvertiserReport):
def __init__(self,
api_key,
advertiser_id,
export_limit=2000000,
export_delay=30,
export_timeout=1500):
super(AdvertiserReportLog, self).__init__(api_key)
self.advertiser_id = advertiser_id
self.export_limit = export_limit
self.export_delay = export_delay
self.export_timeout = export_timeout
self.params = Params(api_key)
self.params.filter = Field('test_profile_id').is_null()
self.export = Export(
self.export_url,
self.params,
self.export_limit,
self.export_delay,
self.export_timeout
)
def get_dataframe(self):
df = read_csv(self.export.csv_url, parse_dates=True)
df_len = len(df.index)
if __debug__:
print_bold('DataFrame length: {}'.format(df_len))
if df_len >= self.export_limit:
raise MatExportError(
'Number of results exceeds the limit. '
'The data are incomplete. '
'Increase the limit parameter or request data for a lesser period.'
)
return df
| mit |
shusenl/scikit-learn | examples/svm/plot_svm_kernels.py | 329 | 1971 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM-Kernels
=========================================================
Three different types of SVM-Kernels are displayed below.
The polynomial and RBF are especially useful when the
data-points are not linearly separable.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# Our dataset and targets
X = np.c_[(.4, -.7),
(-1.5, -1),
(-1.4, -.9),
(-1.3, -1.2),
(-1.1, -.2),
(-1.2, -.4),
(-.5, 1.2),
(-1.5, 2.1),
(1, 1),
# --
(1.3, .8),
(1.2, .5),
(.2, -2),
(.5, -2.4),
(.2, -2.3),
(0, -2.7),
(1.3, 2.1)].T
Y = [0] * 8 + [1] * 8
# figure number
fignum = 1
# fit the model
for kernel in ('linear', 'poly', 'rbf'):
clf = svm.SVC(kernel=kernel, gamma=2)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired)
plt.axis('tight')
x_min = -3
x_max = 3
y_min = -3
y_max = 3
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
| bsd-3-clause |
njuwangchen/TravelRec | backend/hack.py | 1 | 15425 |
# coding: utf-8
# In[1]:
import requests, json
from math import radians, cos, sin, asin, sqrt
from datetime import datetime
import random
import time
import cPickle as pickle
# import xgboost as xgb
# import pandas as pd
# In[2]:
# REQUEST POINT OF INTEREST
def requestPoints(city_name):
search_type = "points-of-interest/yapq-search-text"
number = 10
params = "city_name=%s&number_of_images=1&number_of_results=%d" % (city_name, number)
url = "%s/%s?apikey=%s&%s" % (base, search_type, apikey, params)
js = json.loads(sess.get(url).text)
return js
#requestPoints("Chicago")
# In[3]:
# REQUEST FLIGHT
def requestFlights(origin, dest, start_date, end_date, budget):
search_type = "flights/low-fare-search"
number = 5
params = "origin=%s&destination=%s&departure_date=%s&return_date=%s&max_price=%d¤cy=USD&adults=1&non_stop=true&number_of_results=%d" % (origin, dest, start_date, end_date, budget, number)
url = "%s/%s?apikey=%s&%s" % (base, search_type, apikey, params)
js = json.loads(sess.get(url).text)
return js
#requestFlights("BOS", "CHI", "2016-06-01", "2016-06-05", 10000)
# In[4]:
# REQUEST HOTELS NEAR THE AIRPORT
def requestAirportHotels(dest, start_date, end_date, budget):
search_type = "hotels/search-airport"
number = 10
day = abs((datetime.strptime(start_date, "%Y-%m-%d") - datetime.strptime(end_date, "%Y-%m-%d")).days)
params = "location=%s&check_in=%s&check_out=%s&max_rate=%f&number_of_results=%d¤cy=USD" % (dest, start_date, end_date, budget/float(day), number)
url = "%s/%s?apikey=%s&%s" % (base, search_type, apikey, params)
js = json.loads(sess.get(url).text)
return js
#requestAirportHotels("BOS", "2016-06-01", "2016-06-05", 10000)
# In[5]:
# REQUEST GEO HOTELS
def requestGeoHotels(latitude, longitude, radius, start_date, end_date, budget):
search_type = "hotels/search-circle"
number = 10
day = abs((datetime.strptime(start_date, "%Y-%m-%d") - datetime.strptime(end_date, "%Y-%m-%d")).days)
params = "latitude=%f&longitude=%f&radius=%f&check_in=%s&check_out=%s¤cy=USD&max_rate=%f&number_of_results=%d" % (latitude, longitude, radius, start_date, end_date, budget/float(day), number)
url = "%s/%s?apikey=%s&%s" % (base, search_type, apikey, params)
js = json.loads(sess.get(url).text)
return js
#print requestAirportHotels(36.0857, -115.1541, 42, "2016-06-14", "2016-06-16", 200)
# In[6]:
# REQUEST TOP DEST
def requestTopDests(origin, date, number):
search_type = "travel-intelligence/top-destinations"
params = "period=%s&origin=%s&number_of_results=%d" % (date, origin, number)
url = "%s/%s?apikey=%s&%s" % (base, search_type, apikey, params)
js = json.loads(sess.get(url).text)
return js
#print requestTopDests("BOS", "2015-01", 10)
# In[7]:
# REQUEST TOP SEARCHES
def requestTopSearches(origin, date, number):
search_type = "travel-intelligence/top-searches"
params = "period=%s&origin=%s&country=US&number_of_results=%d" % (date, origin, number)
url = "%s/%s?apikey=%s&%s" % (base, search_type, apikey, params)
js = json.loads(sess.get(url).text)
return js
#print requestTopSearches("BOS", "2015-01", 10)
# In[8]:
# REQUEST INSPIRATION FLIGHT
def requestInspirFlight(origin, date0, date1, budget, number):
search_type = "flights/inspiration-search"
params = "origin=%s&departure_date=%s--%s&max_price=%s" % (origin, date0, date1, budget)
url = "%s/%s?apikey=%s&%s" % (base, search_type, apikey, params)
js = json.loads(sess.get(url).text)
return js
#print requestInspirFlight("BOS", "2016-06-14", "2016-06-16", 300, 10)
# In[9]:
# REQUEST CODE TO CITY
def requestCodeToCity(city_code):
if (city_code == "WAS"):
return "Washington"
url = "%s/location/%s?apikey=%s" % (base, city_code, apikey)
js = json.loads(sess.get(url).text)
#print "city_code = ", city_code
return js["airports"][0]["city_name"]
#print requestCodeToCity("BOS")
# In[10]:
# REQUEST POINT OF INTEREST
def requestPoints(city_name):
search_type = "points-of-interest/yapq-search-text"
number = 10
params = "city_name=%s&number_of_images=1&number_of_results=%d" % (city_name, number)
url = "%s/%s?apikey=%s&%s" % (base, search_type, apikey, params)
#print "city_name = ", city_name
js = json.loads(sess.get(url).text)
return js
#requestPoints("Chicago")
# In[11]:
# GENERATE CANDIDATE LIST
def candidatesList(origin, date0, date1, budget):
candidates = set()
l = requestTopDests(origin, "2015-" + date0.split("-")[1], 10)
if (l.has_key("results") == True):
candidates = candidates | {x["destination"] for x in l["results"]}
l = requestTopSearches(origin, "2015-" + date0.split("-")[1], 5)
if (l.has_key("results") == True):
candidates = candidates | {x["destination"] for x in l["results"]}
l = requestInspirFlight(origin, date0, date1, budget, 10)
if (l.has_key("results") == True):
candidates = candidates | {x["destination"] for x in l["results"][:15]}
candidates &= support_code
return candidates
#print candidatesList("BOS", "2016-06-01", "2016-06-17", 1000)"2015-" + date0.split("-")[1]
# In[12]:
# CALCULATE DISTANCE
def haversine(lon1, lat1, lon2, lat2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
"""
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
c = 2 * asin(sqrt(a))
r = 6371 # Radius of earth in kilometers. Use 3956 for miles
return c * r
# In[13]:
# CALCULATE SCORES - linear method
# def calulateScoreLinear(feature):
def calulateScore(feature):
feature['average_rate'] = feature['average_rate'] / 5 # 0.2
feature['central_dst'] = 1 - feature['central_dst'] / 50 # 0.2
feature['flight_price'] = 1 - feature['flight_price'] # 0.1
feature['hotel_price'] = 1 - feature['hotel_price'] # 0.1
feature['total_price'] = 1 - feature['average_rate'] # 0.3
feature['nonstop'] = feature['nonstop'] # 0.1
return feature['average_rate'] * 0.2 + feature['central_dst'] * 0.2 + feature['flight_price'] * 0.2 + feature['hotel_price'] * 0.1 + feature['total_price'] * 0.3 + feature['nonstop'] * 0.1
#return random.random()
# In[14]:
'''
# CALCULATE SCORES - xgb model
# def calulateScoreXGB(feature):
def calulateScore(feature):
feature['average_rate'] = feature['average_rate'] / 5 # 0.2
feature['central_dst'] = 1 - feature['central_dst'] / 50 # 0.2
feature['flight_price'] = 1 - feature['flight_price'] # 0.1
feature['hotel_price'] = 1 - feature['hotel_price'] # 0.1
feature['total_price'] = 1 - feature['average_rate'] # 0.3
feature['nonstop'] = feature['nonstop'] # 0.1
feature_list = [feature]
testData = pd.DataFrame(feature_list)
predicted = xg.predict(xgb.DMatrix(testData[select_features]))
return predicted[0]
#return random.random()
'''
# In[15]:
def getScoreList(origin, start_date, end_date, budget):
dest_list = candidatesList(origin, start_date, end_date, budget)
print "dest_list = ", dest_list
dest_score_list = []
for dest in dest_list:
print "current dest = ", dest
# REQUEST POINTS OF INTERTES
point_js = poi_dict[dest]
if (point_js.has_key("points_of_interest") == False or len(point_js["points_of_interest"]) == 0):
print "num of points_of_interest = 0"
continue
else: print "num of points_of_interest = ", len(point_js["points_of_interest"])
l = reduce(lambda (n1, r1, lo1, la1),(n2, r2, lo2, la2): (n1 + n2, r1 + r2, lo1 + lo2, la1 + la2), map(lambda x: (1, float(x["grades"]["yapq_grade"]), float(x["location"]["longitude"]), float(x["location"]["latitude"])), point_js["points_of_interest"]))
average_rate = l[1] / l[0]
average_lo = l[2] / l[0]
average_la = l[3] / l[0]
# REQUEST HOTEL INFO
hotel_js = requestGeoHotels(average_la, average_lo, 50, start_date, end_date, budget)
if (hotel_js.has_key("results") == False or len(hotel_js["results"]) == 0):
print "num of hotels = 0"
continue
else: print "num of hotels = ", len(hotel_js["results"])
# REQUEST FLIGHT INFO
flight_js = requestFlights(origin, dest, start_date, end_date, budget)
if (flight_js.has_key("results") == False or len(flight_js["results"]) == 0):
print "num of flight = 0"
continue
else: print "num of flight = ", len(flight_js["results"])
score_list = []
for flight in flight_js["results"]:
flightID = "origin=%s,dest=%s,start_date=%s,end_date=%s,outbound=%s,inbound=%s" % (origin, dest, start_date, end_date, flight["itineraries"][0]["outbound"]["flights"][0]["aircraft"], flight["itineraries"][0]["inbound"]["flights"][0]["aircraft"])
#print "flightID =", flightID
flight_dict[flightID] = flight
for hotel in hotel_js["results"]:
hotelID = "dest=%s,start_date=%s,end_date=%s,property_code=%s" % (dest, start_date, end_date, hotel["property_code"])
#print "hotelID =", hotelID
hotel_dict[hotelID] = hotel
feature = {}
# flight feature
feature["flight_price"] = float(flight["fare"]["total_price"]) / budget
if (len(flight["itineraries"][0]["inbound"]["flights"]) != 1 or len(flight["itineraries"][0]["outbound"]["flights"]) != 1):
feature["nonstop"] = 0
else: feature["nonstop"] = 1
# hotel feature
feature["hotel_price"] = float(hotel["total_price"]["amount"]) / budget
# point feature
feature["average_rate"] = average_rate
# combination feature
feature["total_price"] = 1 - (feature["flight_price"] + feature["hotel_price"])
if feature["total_price"] > 1:
continue
feature["central_dst"] = 50 - haversine(average_lo, average_la, float(hotel["location"]["longitude"]), float(hotel["location"]["latitude"]))
# Calculate Score
score = calulateScore(feature)
score_list.append((score, flightID, hotelID))
#print "feature = ", feature
#print "score = ", score
score_list = sorted(score_list, key=lambda score: score[0], reverse = True)
if (len(score_list) > 0):
# print "score_list = ", score_list
ret = (score_list[0][0], dest, [score_list[i] for i in range(min(3, len(score_list)))])
# print ret
dest_score_list.append(ret)
#return 0
else:
print "Warning : len(score_list) == 0 ?"
print "Warning : num of points_of_interest = ", len(point_js["points_of_interest"])
print "Warning : num of hotels = ", len(hotel_js["results"])
print "Warning : num of flight = ", len(flight_js["results"])
dest_score_list = sorted(dest_score_list, key=lambda score: score[0], reverse = True)
return dest_score_list
# In[16]:
def formatRet(report):
ret = {}
ret["name"] = requestCodeToCity(report[1])
routes = []
for i in report[2]:
curr = {}
curr["flight"] = flight_dict[i[1]]
curr["hotel"] = hotel_dict[i[2]]
curr["rating"] = i[0]
routes.append(curr)
ret["routes"] = routes
ret["POIs"] = poi_dict[report[1]]["points_of_interest"]
return ret
#ret = formatRet(dest_score_list[0])
#print ret
# In[17]:
def learnPOIList():
# Precessing for just one time
for city_name in support_name:
if city_to_code_dict.has_key(city_name) == False:
continue
city_code = city_to_code_dict[city_name]
poi_dict[city_code] = requestPoints(city_name)
print city_code, " - ", city_name
while (poi_dict[city_code].has_key("points_of_interest") == False or len(poi_dict[city_code]["points_of_interest"]) == 0):
print "Warning: no interest? dest = ", city_code
print poi_dict[city_code]
time.sleep(2)
poi_dict[city_code] = requestPoints(requestCodeToCity(city_code))
pickle.dump(poi_dict, open("/var/www/demo/poi_dict.pkl","wb"))
# In[18]:
def preprocessing():
# BASIC API STAFF
global apikey, sess, base
apikey = "oz0gEI6TQenhtwNMnpI8UU7tYZfHvbAa"
sess = requests.Session()
base = "https://api.sandbox.amadeus.com/v1.2/"
# CITY_TO_CODE_DICT
global city_to_code_dict
city_to_code_dict = {}
with open('/var/www/demo/code.txt') as f:
for l in f:
curr = l.decode("utf-8").split()
city_to_code_dict[" ".join(curr[:-2])] = curr[-2]
f.close()
# SUPPORT_DICT
global support_name, support_code
with open("/var/www/demo/cities.txt") as f:
support_name = {x[:-1].decode("utf-8") for x in f}
support_code = {x for x in map(lambda x : city_to_code_dict[x] if city_to_code_dict.has_key(x) else x, support_name)}
f.close()
# FUNCTIONAL DICT
global flight_dict, hotel_dict, poi_dict
flight_dict, hotel_dict = {}, {}
# learnPOIList()
poi_dict = pickle.load(open("/var/www/demo/poi_dict.pkl","rb"))
# XGB Model
# global xg, select_features
# xg = pickle.load(open("/var/www/demo/xg_model.pkl","rb"))
# select_features = ['average_rate', 'central_dst', 'flight_price', 'hotel_price', 'total_price', 'nonstop']
# In[19]:
# responseRequest API
def responseRequest(city_name, start_date, end_date, budget):
# Error Check
city_name = city_name.strip().capitalize()
if city_to_code_dict.has_key(city_name) == False:
return {"results" : [], "message" : "We are sorry that we don't support this city"}
if datetime.strptime(start_date, "%Y-%m-%d") >= datetime.strptime(end_date, "%Y-%m-%d"):
return {"results" : [], "message" : "Start date should be earlier than end date"}
# Get messages
origin = city_to_code_dict[city_name]
print "origin = ", origin
dest_score_list = getScoreList(origin, start_date, end_date, budget)
ret = [formatRet(i) for i in dest_score_list]
response = {"results" : ret[:3]}
return response
#responseRequest("CHI", "2016-06-25", "2016-06-28", 1000)
# In[20]:
preprocessing()
# In[21]:
# t = responseRequest("Boston", "2016-06-25", "2016-06-28", 1000)
# t
# In[22]:
# responseRequest("Chicago", "2016-06-25", "2016-03-28", 1000)
# In[ ]:
| apache-2.0 |
pompiduskus/scikit-learn | examples/exercises/plot_cv_diabetes.py | 231 | 2527 | """
===============================================
Cross-validation on diabetes Dataset Exercise
===============================================
A tutorial exercise which uses cross-validation with linear models.
This exercise is used in the :ref:`cv_estimators_tut` part of the
:ref:`model_selection_tut` section of the :ref:`stat_learn_tut_index`.
"""
from __future__ import print_function
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cross_validation, datasets, linear_model
diabetes = datasets.load_diabetes()
X = diabetes.data[:150]
y = diabetes.target[:150]
lasso = linear_model.Lasso()
alphas = np.logspace(-4, -.5, 30)
scores = list()
scores_std = list()
for alpha in alphas:
lasso.alpha = alpha
this_scores = cross_validation.cross_val_score(lasso, X, y, n_jobs=1)
scores.append(np.mean(this_scores))
scores_std.append(np.std(this_scores))
plt.figure(figsize=(4, 3))
plt.semilogx(alphas, scores)
# plot error lines showing +/- std. errors of the scores
plt.semilogx(alphas, np.array(scores) + np.array(scores_std) / np.sqrt(len(X)),
'b--')
plt.semilogx(alphas, np.array(scores) - np.array(scores_std) / np.sqrt(len(X)),
'b--')
plt.ylabel('CV score')
plt.xlabel('alpha')
plt.axhline(np.max(scores), linestyle='--', color='.5')
##############################################################################
# Bonus: how much can you trust the selection of alpha?
# To answer this question we use the LassoCV object that sets its alpha
# parameter automatically from the data by internal cross-validation (i.e. it
# performs cross-validation on the training data it receives).
# We use external cross-validation to see how much the automatically obtained
# alphas differ across different cross-validation folds.
lasso_cv = linear_model.LassoCV(alphas=alphas)
k_fold = cross_validation.KFold(len(X), 3)
print("Answer to the bonus question:",
"how much can you trust the selection of alpha?")
print()
print("Alpha parameters maximising the generalization score on different")
print("subsets of the data:")
for k, (train, test) in enumerate(k_fold):
lasso_cv.fit(X[train], y[train])
print("[fold {0}] alpha: {1:.5f}, score: {2:.5f}".
format(k, lasso_cv.alpha_, lasso_cv.score(X[test], y[test])))
print()
print("Answer: Not very much since we obtained different alphas for different")
print("subsets of the data and moreover, the scores for these alphas differ")
print("quite substantially.")
plt.show()
| bsd-3-clause |
ZenDevelopmentSystems/scikit-learn | examples/covariance/plot_outlier_detection.py | 235 | 3891 | """
==========================================
Outlier detection with several methods.
==========================================
When the amount of contamination is known, this example illustrates two
different ways of performing :ref:`outlier_detection`:
- based on a robust estimator of covariance, which is assuming that the
data are Gaussian distributed and performs better than the One-Class SVM
in that case.
- using the One-Class SVM and its ability to capture the shape of the
data set, hence performing better when the data is strongly
non-Gaussian, i.e. with two well-separated clusters;
The ground truth about inliers and outliers is given by the points colors
while the orange-filled area indicates which points are reported as inliers
by each method.
Here, we assume that we know the fraction of outliers in the datasets.
Thus rather than using the 'predict' method of the objects, we set the
threshold on the decision_function to separate out the corresponding
fraction.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from scipy import stats
from sklearn import svm
from sklearn.covariance import EllipticEnvelope
# Example settings
n_samples = 200
outliers_fraction = 0.25
clusters_separation = [0, 1, 2]
# define two outlier detection tools to be compared
classifiers = {
"One-Class SVM": svm.OneClassSVM(nu=0.95 * outliers_fraction + 0.05,
kernel="rbf", gamma=0.1),
"robust covariance estimator": EllipticEnvelope(contamination=.1)}
# Compare given classifiers under given settings
xx, yy = np.meshgrid(np.linspace(-7, 7, 500), np.linspace(-7, 7, 500))
n_inliers = int((1. - outliers_fraction) * n_samples)
n_outliers = int(outliers_fraction * n_samples)
ground_truth = np.ones(n_samples, dtype=int)
ground_truth[-n_outliers:] = 0
# Fit the problem with varying cluster separation
for i, offset in enumerate(clusters_separation):
np.random.seed(42)
# Data generation
X1 = 0.3 * np.random.randn(0.5 * n_inliers, 2) - offset
X2 = 0.3 * np.random.randn(0.5 * n_inliers, 2) + offset
X = np.r_[X1, X2]
# Add outliers
X = np.r_[X, np.random.uniform(low=-6, high=6, size=(n_outliers, 2))]
# Fit the model with the One-Class SVM
plt.figure(figsize=(10, 5))
for i, (clf_name, clf) in enumerate(classifiers.items()):
# fit the data and tag outliers
clf.fit(X)
y_pred = clf.decision_function(X).ravel()
threshold = stats.scoreatpercentile(y_pred,
100 * outliers_fraction)
y_pred = y_pred > threshold
n_errors = (y_pred != ground_truth).sum()
# plot the levels lines and the points
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
subplot = plt.subplot(1, 2, i + 1)
subplot.set_title("Outlier detection")
subplot.contourf(xx, yy, Z, levels=np.linspace(Z.min(), threshold, 7),
cmap=plt.cm.Blues_r)
a = subplot.contour(xx, yy, Z, levels=[threshold],
linewidths=2, colors='red')
subplot.contourf(xx, yy, Z, levels=[threshold, Z.max()],
colors='orange')
b = subplot.scatter(X[:-n_outliers, 0], X[:-n_outliers, 1], c='white')
c = subplot.scatter(X[-n_outliers:, 0], X[-n_outliers:, 1], c='black')
subplot.axis('tight')
subplot.legend(
[a.collections[0], b, c],
['learned decision function', 'true inliers', 'true outliers'],
prop=matplotlib.font_manager.FontProperties(size=11))
subplot.set_xlabel("%d. %s (errors: %d)" % (i + 1, clf_name, n_errors))
subplot.set_xlim((-7, 7))
subplot.set_ylim((-7, 7))
plt.subplots_adjust(0.04, 0.1, 0.96, 0.94, 0.1, 0.26)
plt.show()
| bsd-3-clause |
gotomypc/scikit-learn | sklearn/cluster/tests/test_spectral.py | 262 | 7954 | """Testing for Spectral Clustering methods"""
from sklearn.externals.six.moves import cPickle
dumps, loads = cPickle.dumps, cPickle.loads
import numpy as np
from scipy import sparse
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_warns_message
from sklearn.cluster import SpectralClustering, spectral_clustering
from sklearn.cluster.spectral import spectral_embedding
from sklearn.cluster.spectral import discretize
from sklearn.metrics import pairwise_distances
from sklearn.metrics import adjusted_rand_score
from sklearn.metrics.pairwise import kernel_metrics, rbf_kernel
from sklearn.datasets.samples_generator import make_blobs
def test_spectral_clustering():
S = np.array([[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[0.2, 0.2, 0.2, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]])
for eigen_solver in ('arpack', 'lobpcg'):
for assign_labels in ('kmeans', 'discretize'):
for mat in (S, sparse.csr_matrix(S)):
model = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed',
eigen_solver=eigen_solver,
assign_labels=assign_labels
).fit(mat)
labels = model.labels_
if labels[0] == 0:
labels = 1 - labels
assert_array_equal(labels, [1, 1, 1, 0, 0, 0, 0])
model_copy = loads(dumps(model))
assert_equal(model_copy.n_clusters, model.n_clusters)
assert_equal(model_copy.eigen_solver, model.eigen_solver)
assert_array_equal(model_copy.labels_, model.labels_)
def test_spectral_amg_mode():
# Test the amg mode of SpectralClustering
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
try:
from pyamg import smoothed_aggregation_solver
amg_loaded = True
except ImportError:
amg_loaded = False
if amg_loaded:
labels = spectral_clustering(S, n_clusters=len(centers),
random_state=0, eigen_solver="amg")
# We don't care too much that it's good, just that it *worked*.
# There does have to be some lower limit on the performance though.
assert_greater(np.mean(labels == true_labels), .3)
else:
assert_raises(ValueError, spectral_embedding, S,
n_components=len(centers),
random_state=0, eigen_solver="amg")
def test_spectral_unknown_mode():
# Test that SpectralClustering fails with an unknown mode set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, eigen_solver="<unknown>")
def test_spectral_unknown_assign_labels():
# Test that SpectralClustering fails with an unknown assign_labels set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, assign_labels="<unknown>")
def test_spectral_clustering_sparse():
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01)
S = rbf_kernel(X, gamma=1)
S = np.maximum(S - 1e-4, 0)
S = sparse.coo_matrix(S)
labels = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed').fit(S).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
def test_affinities():
# Note: in the following, random_state has been selected to have
# a dataset that yields a stable eigen decomposition both when built
# on OSX and Linux
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01
)
# nearest neighbors affinity
sp = SpectralClustering(n_clusters=2, affinity='nearest_neighbors',
random_state=0)
assert_warns_message(UserWarning, 'not fully connected', sp.fit, X)
assert_equal(adjusted_rand_score(y, sp.labels_), 1)
sp = SpectralClustering(n_clusters=2, gamma=2, random_state=0)
labels = sp.fit(X).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
X = check_random_state(10).rand(10, 5) * 10
kernels_available = kernel_metrics()
for kern in kernels_available:
# Additive chi^2 gives a negative similarity matrix which
# doesn't make sense for spectral clustering
if kern != 'additive_chi2':
sp = SpectralClustering(n_clusters=2, affinity=kern,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
sp = SpectralClustering(n_clusters=2, affinity=lambda x, y: 1,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
def histogram(x, y, **kwargs):
# Histogram kernel implemented as a callable.
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
sp = SpectralClustering(n_clusters=2, affinity=histogram, random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
# raise error on unknown affinity
sp = SpectralClustering(n_clusters=2, affinity='<unknown>')
assert_raises(ValueError, sp.fit, X)
def test_discretize(seed=8):
# Test the discretize using a noise assignment matrix
random_state = np.random.RandomState(seed)
for n_samples in [50, 100, 150, 500]:
for n_class in range(2, 10):
# random class labels
y_true = random_state.random_integers(0, n_class, n_samples)
y_true = np.array(y_true, np.float)
# noise class assignment matrix
y_indicator = sparse.coo_matrix((np.ones(n_samples),
(np.arange(n_samples),
y_true)),
shape=(n_samples,
n_class + 1))
y_true_noisy = (y_indicator.toarray()
+ 0.1 * random_state.randn(n_samples,
n_class + 1))
y_pred = discretize(y_true_noisy, random_state)
assert_greater(adjusted_rand_score(y_true, y_pred), 0.8)
| bsd-3-clause |
bourque/acsql | paper/plot_file_sizes.py | 1 | 2400 | #! /usr/bin/env python
"""Create plot that shows the size of the filesystem over time.
Authors
-------
Matthew Bourque
Use
---
This script is intended to be executed via the command line as
such:
::
python plot_file_sizes.py
Dependencies
------------
- ``matplotlib``
"""
import datetime
import matplotlib.pyplot as plt
def main():
"""The main function."""
# Read in the data
with open('figures/file_sizes.dat', 'r') as f:
data = f.readlines()
data = [item.strip().split(',') for item in data]
dates = [item[0] for item in data]
sizes = [float(item[1]) for item in data]
detectors = [item[2] for item in data]
# Sort the data by date
dates, sizes, detectors = (list(x) for x in zip(*sorted(zip(dates, sizes, detectors))))
# Convert the dates to datetime objects
dates = [datetime.datetime.strptime(date, '%Y-%m-%d') for date in dates]
# Get list of aggregate sizes
agg_sizes_all, agg_sizes_wfc, agg_sizes_hrc, agg_sizes_sbc = [], [], [], []
agg_size_all, agg_size_wfc, agg_size_hrc, agg_size_sbc = 0, 0, 0, 0
dates_all, dates_wfc, dates_hrc, dates_sbc = [], [], [], []
for size, date, detector in zip(sizes, dates, detectors):
dates_all.append(date)
agg_size_all += size
agg_sizes_all.append(agg_size_all)
if detector == 'WFC':
dates_wfc.append(date)
agg_size_wfc += size
agg_sizes_wfc.append(agg_size_wfc)
if detector == 'HRC':
dates_hrc.append(date)
agg_size_hrc += size
agg_sizes_hrc.append(agg_size_hrc)
if detector == 'SBC':
dates_sbc.append(date)
agg_size_sbc += size
agg_sizes_sbc.append(agg_size_sbc)
# Plot the data
plt.rcParams['font.size'] = 14
plt.style.use('bmh')
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111)
ax.set_title('Total Size of acsql Filesystem')
ax.set_ylabel('Size (TB)')
ax.plot(dates_all, agg_sizes_all, linewidth=3, label='Total')
ax.plot(dates_wfc, agg_sizes_wfc, linewidth=3, label='WFC')
ax.plot(dates_hrc, agg_sizes_hrc, linewidth=3, label='HRC')
ax.plot(dates_sbc, agg_sizes_sbc, linewidth=3, label='SBC')
plt.legend()
plt.tight_layout()
plt.savefig('figures/filesystem_size.png')
if __name__ == '__main__':
main()
| bsd-3-clause |
pandyag/trading-with-python | lib/bats.py | 78 | 3458 | #-------------------------------------------------------------------------------
# Name: BATS
# Purpose: get data from BATS exchange
#
# Author: jev
#
# Created: 17/08/2013
# Copyright: (c) Jev Kuznetsov 2013
# Licence: BSD
#-------------------------------------------------------------------------------
import urllib
import re
import pandas as pd
import datetime as dt
import zipfile
import StringIO
from extra import ProgressBar
import os
import yahooFinance as yf
from string import Template
import numpy as np
def fileName2date( fName):
'''convert filename to date'''
name = os.path.splitext(fName)[0]
m = re.findall('\d+',name)[0]
return dt.datetime.strptime(m,'%Y%m%d').date()
def date2fileName(date):
return 'BATSshvol%s.txt.zip' % date.strftime('%Y%m%d')
def downloadUrl(date):
s = Template('http://www.batstrading.com/market_data/shortsales/$year/$month/$fName-dl?mkt=bzx')
url = s.substitute(fName=date2fileName(date), year=date.year, month='%02d' % date.month)
return url
class BATS_Data(object):
def __init__(self, dataDir):
''' create class. dataDir: directory to which files are downloaded '''
self.dataDir = dataDir
self.shortRatio = None
self._checkDates()
def _checkDates(self):
''' update list of available dataset dates'''
self.dates = []
for fName in os.listdir(self.dataDir):
self.dates.append(fileName2date(fName))
def _missingDates(self):
''' check for missing dates based on spy data'''
print 'Getting yahoo data to determine business dates... ',
spy = yf.getHistoricData('SPY',sDate = (2010,1,1))
busDates = [d.date() for d in spy.index ]
print 'Date range: ', busDates[0] ,'-', busDates[-1]
missing = []
for d in busDates:
if d not in self.dates:
missing.append(d)
return missing
def updateDb(self):
print 'Updating database'
missing = self._missingDates()
for i, date in enumerate(missing):
source = downloadUrl(date)
dest = os.path.join(self.dataDir,date2fileName(date))
if not os.path.exists(dest):
print 'Downloading [%i/%i]' %(i,len(missing)), source
urllib.urlretrieve(source, dest)
else:
print 'x',
print 'Update done.'
self._checkDates()
def loadDate(self,date):
fName = os.path.join(self.dataDir, date2fileName(date))
zipped = zipfile.ZipFile(fName) # open zip file
lines = zipped.read(zipped.namelist()[0]) # read first file from to lines
buf = StringIO.StringIO(lines) # create buffer
df = pd.read_csv(buf,sep='|',index_col=1,parse_dates=False,dtype={'Date':object,'Short Volume':np.float32,'Total Volume':np.float32})
s = df['Short Volume']/df['Total Volume']
s.name = dt.datetime.strptime(df['Date'][-1],'%Y%m%d')
return s
def loadData(self):
''' load data from zip files '''
data = []
pb = ProgressBar(len(self.dates)-1)
for idx, date in enumerate(self.dates):
data.append(self.loadDate(date))
pb.animate(idx)
self.shortRatio = pd.DataFrame(data)
return self.shortRatio
| bsd-3-clause |
Karel-van-de-Plassche/QLKNN-develop | tests/gen2_test_files/filtering.py | 1 | 12452 | from __future__ import division
import re
from itertools import product
from IPython import embed
import pandas as pd
import numpy as np
import gc
particle_vars = [u'pf', u'df', u'vt', u'vr', u'vc']
heat_vars = [u'ef']
momentum_vars = [u'vf']
store_format = 'table'
#'vti_GB', 'dfi_GB', 'vci_GB',
# 'pfi_GB', 'efi_GB',
#
# 'efe_GB', 'vce_GB', 'pfe_GB',
# 'vte_GB', 'dfe_GB'
#'chie', 'ven', 'ver', 'vec']
def regime_filter(data, leq, less):
bool = pd.Series(np.full(len(data), True, dtype='bool'), index=data.index)
bool &= (data['efe_GB'] < less) & (data['efi_GB'] < less)
bool &= (data['efe_GB'] >= leq) & (data['efi_GB'] >= leq)
data = data.loc[bool]
return data
div_bounds = {
'efeITG_GB_div_efiITG_GB': (0.05, 1.5),
'pfeITG_GB_div_efiITG_GB': (0.05, 2),
'efeTEM_GB_div_efiTEM_GB': (0.02, 0.5),
'pfeTEM_GB_div_efiTEM_GB': (0.01, 0.8)
}
def div_filter(store):
# This is hand-picked:
# 0.05 < efeITG/efiITG < 1.5
# 0.05 < efiTEM/efeTEM < 2
# 0.02 < abs(pfeITG/efiITG) < 0.5
# 0.01 < abs(pfeTEM/efiTEM) < 0.8
for group in store:
if isinstance(store, pd.HDFStore):
group = group[1:]
pre = len(store[group])
se = store[group]
if group in div_bounds:
low, high = div_bounds[group]
embed()
else:
continue
store[group] = se.loc[(low < se) & (se < high)]
print('{:.2f}% of sane {!s:<9} points inside div bounds'.format(np.sum(~store[group].isnull()) / pre * 100, group))
def stability_filter(data):
for col in data.columns:
splitted = re.compile('(?=.*)(.)(|ITG|ETG|TEM)_(GB|SI|cm)').split(col)
if splitted[0] not in heat_vars + particle_vars + momentum_vars:
print('skipping {!s}'.format(col))
continue
if splitted[2] == 'TEM':
gam_filter = 'tem'
elif splitted[2] == 'ITG':
gam_filter = 'itg'
elif splitted[2] == 'ETG':
gam_filter = 'elec'
elif splitted[0] in heat_vars and splitted[1] == 'e':
gam_filter = 'multi'
else:
gam_filter = 'ion'
pre = len(data[col])
if gam_filter == 'ion':
data[col] = data[col].loc[data['gam_leq_GB'] != 0]
elif gam_filter == 'elec':
data[col] = data[col].loc[data['gam_great_GB'] != 0]
elif gam_filter == 'multi':
data[col] = data[col].loc[(data['gam_leq_GB'] != 0) | (data['gam_great_GB'] != 0)]
elif gam_filter == 'tem':
data[col] = data[col].loc[data['TEM']]
elif gam_filter == 'itg':
data[col] = data[col].loc[data['ITG']]
print('{:.2f}% of sane {!s:<9} points unstable at {!s:<5} scale'.format(np.sum(~data[col].isnull()) / pre * 100, col, gam_filter))
return data
def filter_negative(data):
bool = pd.Series(np.full(len(data), True, dtype='bool'), index=data.index)
for col in data.columns:
splitted = re.compile('(?=.*)(.)(|ITG|ETG|TEM)_(GB|SI|cm)').split(col)
if splitted[0] in heat_vars:
bool &= (data[col] >= 0)
elif splitted[0] in particle_vars:
pass
return bool
def filter_ck(data, bound):
return (np.abs(data['cki']) < bound) & (np.abs(data['cke']) < bound)
def filter_totsep(data, septot_factor, startlen=None):
if startlen is None:
startlen = len(data)
bool = pd.Series(np.full(len(data), True, dtype='bool'), index=data.index)
for type, spec in product(particle_vars + heat_vars, ['i', 'e']):
totname = type + spec + '_GB'
if totname != 'vre_GB' and totname != 'vri_GB':
if type in particle_vars or spec == 'i': # no ETG
seps = ['ITG', 'TEM']
else: # All modes
seps = ['ETG', 'ITG', 'TEM']
for sep in seps:
sepname = type + spec + sep + '_GB'
#sepflux += data[sepname]
bool &= np.abs(data[sepname]) <= septot_factor * np.abs(data[totname])
print('After filter {!s:<6} {!s:<6} {:.2f}% left'.format('septot', totname, 100*np.sum(bool)/startlen))
return bool
def filter_ambipolar(data, bound):
return (data['absambi'] < bound) & (data['absambi'] > 1/bound)
def filter_femtoflux(data, bound):
fluxes = [col for col in data if len(re.compile('(?=.*)(.)(|ITG|ETG|TEM)_(GB|SI|cm)').split(col)) > 1 if re.compile('(?=.*)(.)(|ITG|ETG|TEM)_(GB|SI|cm)').split(col)[0] in particle_vars + heat_vars + momentum_vars]
absflux = data[fluxes].abs()
return ~((absflux < bound) & (absflux != 0)).any(axis=1)
def sanity_filter(data, ck_bound, septot_factor, ambi_bound, femto_bound, startlen=None):
if startlen is None:
startlen = len(data)
# Throw away point if negative heat flux
data = data.loc[filter_negative(data)]
print('After filter {!s:<13} {:.2f}% left'.format('negative', 100*len(data)/startlen))
gc.collect()
# Throw away point if cke or cki too high
data = data.loc[filter_ck(data, ck_bound)]
print('After filter {!s:<13} {:.2f}% left'.format('ck', 100*len(data)/startlen))
gc.collect()
# Throw away point if sep flux is way higher than tot flux
data = data.loc[filter_totsep(data, septot_factor, startlen=startlen)]
print('After filter {!s:<13} {:.2f}% left'.format('septot', 100*len(data)/startlen))
gc.collect()
data = data.loc[filter_ambipolar(data, ambi_bound)]
print('After filter {!s:<13} {:.2f}% left'.format('ambipolar', 100*len(data)/startlen))
gc.collect()
data = data.loc[filter_femtoflux(data, femto_bound)]
print('After filter {!s:<13} {:.2f}% left'.format('femtoflux', 100*len(data)/startlen))
gc.collect()
# Alternatively:
#data = data.loc[filter_negative(data) & filter_ck(data, ck_bound) & filter_totsep(data, septot_factor)]
return data
#for col in data.columns:
# splitted = re.compile('(?=.*)(.)(|ITG|ETG|TEM)_(GB|SI|cm)').split(col)
# if splitted[0] in particle_vars + heat_vars:
# if splitted[2] != '':
# data.loc[]
def separate_to_store(input, data, const, storename):
store = pd.HDFStore(storename)
store['input'] = input.loc[data.index]
for col in data:
splitted = re.compile('(?=.*)(.)(|ITG|ETG|TEM)_(GB|SI|cm)').split(col)
if splitted[0] in heat_vars + particle_vars + momentum_vars + ['gam_leq_GB', 'gam_less_GB']:
store.put(col, data[col].dropna(), format=store_format)
store.put('constants', const)
store.close()
def create_divsum(store):
for group in store:
if isinstance(store, pd.HDFStore):
group = group[1:]
splitted = re.compile('(?=.*)(.)(|ITG|ETG|TEM)(_GB|SI|cm)').split(group)
if splitted[0] in heat_vars and splitted[1] == 'i' and len(splitted) == 5:
group2 = splitted[0] + 'e' + ''.join(splitted[2:])
sets = [('_'.join([group, 'plus', group2]),
store[group] + store[group2]),
('_'.join([group, 'div', group2]),
store[group] / store[group2]),
('_'.join([group2, 'div', group]),
store[group2] / store[group])
]
elif splitted[0] == 'pf' and splitted[1] == 'e' and len(splitted) == 5:
group2 = 'efi' + ''.join(splitted[2:])
group3 = 'efe' + ''.join(splitted[2:])
sets = [
('_'.join([group, 'plus', group2, 'plus', group3]),
store[group] + store[group2] + store[group3]),
('_'.join([group, 'div', group2]),
store[group] / store[group2]),
('_'.join([group, 'div', group3]),
store[group] / store[group3])
]
else:
continue
for name, set in sets:
set.name = name
if isinstance(store, pd.HDFStore):
store.put(set.name, set, format=store_format)
else:
store[set.name] = set
return store
def filter_9D_to_7D(input, Zeffx=1, Nustar=1e-3):
if len(input.columns) != 9:
print("Warning! This function assumes 9D input with ['Ati', 'Ate', 'An', 'qx', 'smag', 'x', 'Ti_Te', 'Zeffx', 'Nustar']")
idx = input.index[(
np.isclose(input['Zeffx'], Zeffx, atol=1e-5, rtol=1e-3) &
np.isclose(input['Nustar'], Nustar, atol=1e-5, rtol=1e-3)
)]
return idx
def filter_7D_to_4D(input, Ate=6.5, An=2, x=0.45):
if len(input.columns) != 7:
print("Warning! This function assumes 9D input with ['Ati', 'Ate', 'An', 'qx', 'smag', 'x', 'Ti_Te']")
idx = input.index[(
np.isclose(input['Ate'], Ate, atol=1e-5, rtol=1e-3) &
np.isclose(input['An'], An, atol=1e-5, rtol=1e-3) &
np.isclose(input['x'], x, atol=1e-5, rtol=1e-3)
)]
return idx
def split_input(input, const):
idx = {}
consts = {9: const.copy(),
7: const.copy(),
4: const.copy()}
idx[7] = filter_9D_to_7D(input)
inputs = {9: input}
idx[9] = input.index
inputs[7] = input.loc[idx[7]]
for name in ['Zeffx', 'Nustar']:
consts[7][name] = inputs[7].head(1)[name]
inputs[7].drop(['Zeffx', 'Nustar'], axis='columns', inplace=True)
idx[4] = filter_7D_to_4D(inputs[7])
inputs[4] = inputs[7].loc[idx[4]]
for name in ['Ate', 'An', 'x']:
consts[4][name] = inputs[4].head(1)[name]
inputs[4].drop(['Ate', 'An', 'x'], axis='columns', inplace=True)
return idx, inputs, consts
def split_sane(input, data, const):
idx, inputs, consts = split_input(input, const)
for dim in [7, 4]:
print('splitting', dim)
store = pd.HDFStore('sane_' + 'gen2_' + str(dim) + 'D_nions0_flat' + '_filter' + str(filter_num) + '.h5')
store['/megarun1/flattened'] = data.loc[idx[dim]]
store['/megarun1/input'] = inputs[dim]
store['/megarun1/constants'] = consts[dim]
store.close()
def split_subsets(input, data, const, frac=0.1):
idx, inputs, consts = split_input(input, const)
rand_index = pd.Int64Index(np.random.permutation(input.index))
sep_index = int(frac * len(rand_index))
idx['test'] = rand_index[:sep_index]
idx['training'] = rand_index[sep_index:]
for dim, set in product([9, 7, 4], ['test', 'training']):
print(dim, set)
store = pd.HDFStore(set + '_' + 'gen2_' + str(dim) + 'D_nions0_flat.h5')
store['/megarun1/flattened'] = data.loc[idx[dim] & idx[set]]
store['/megarun1/input'] = inputs[dim].loc[idx[set]]
store['/megarun1/constants'] = consts[dim]
store.close()
if __name__ == '__main__':
dim = 9
store_name = ''.join(['gen2_', str(dim), 'D_nions0_flat'])
store = pd.HDFStore('../' + store_name + '.h5', 'r')
input = store['/megarun1/input']
data = store['/megarun1/flattened']
startlen = len(data)
data = sanity_filter(data, 50, 1.5, 1.5, 1e-4, startlen=startlen)
data = regime_filter(data, 0, 100)
gc.collect()
input = input.loc[data.index]
print('After filter {!s:<13} {:.2f}% left'.format('regime', 100*len(data)/startlen))
filter_num = 7
sane_store = pd.HDFStore('../sane_' + store_name + '_filter' + str(filter_num) + '.h5')
sane_store['/megarun1/input'] = input
sane_store['/megarun1/flattened'] = data
const = sane_store['/megarun1/constants'] = store['/megarun1/constants']
#input = sane_store['/megarun1/input']
#data = sane_store['/megarun1/flattened']
#const = sane_store['/megarun1/constants']
split_sane(input, data, const)
sane_store.close()
split_subsets(input, data, const, frac=0.1)
del data, input, const
gc.collect()
for dim, set in product([4, 7, 9], ['test', 'training']):
print(dim, set)
basename = set + '_' + 'gen2_' + str(dim) + 'D_nions0_flat.h5'
store = pd.HDFStore(basename)
data = store['/megarun1/flattened']
input = store['/megarun1/input']
const = store['/megarun1/constants']
gam = data['gam_leq_GB']
gam = gam[gam != 0]
data = stability_filter(data)
data.put('gam_leq_GB', gam, format=store_format)
separate_to_store(input, data, const, 'unstable_' + basename)
#separate_to_store(input, data, '../filtered_' + store_name + '_filter6')
| mit |
jjx02230808/project0223 | sklearn/linear_model/tests/test_bayes.py | 299 | 1770 | # Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.linear_model.bayes import BayesianRidge, ARDRegression
from sklearn import datasets
from sklearn.utils.testing import assert_array_almost_equal
def test_bayesian_on_diabetes():
# Test BayesianRidge on diabetes
raise SkipTest("XFailed Test")
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
clf = BayesianRidge(compute_score=True)
# Test with more samples than features
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
# Test with more features than samples
X = X[:5, :]
y = y[:5]
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
def test_toy_bayesian_ridge_object():
# Test BayesianRidge on toy
X = np.array([[1], [2], [6], [8], [10]])
Y = np.array([1, 2, 6, 8, 10])
clf = BayesianRidge(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
def test_toy_ard_object():
# Test BayesianRegression ARD classifier
X = np.array([[1], [2], [3]])
Y = np.array([1, 2, 3])
clf = ARDRegression(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
| bsd-3-clause |
Lawrence-Liu/scikit-learn | doc/tutorial/text_analytics/skeletons/exercise_01_language_train_model.py | 254 | 2005 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a an vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
# TASK: Fit the pipeline on the training set
# TASK: Predict the outcome on the testing set in a variable named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import pylab as pl
#pl.matshow(cm, cmap=pl.cm.jet)
#pl.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
hunter-cameron/CheckM | checkm/plot/tetraDistPlots.py | 2 | 7670 | ###############################################################################
#
# gcPlots.py - Create a GC histogram and delta-GC plot.
#
###############################################################################
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
import matplotlib.pyplot as pylab
import numpy as np
from AbstractPlot import AbstractPlot
from checkm.util.seqUtils import readFasta
from checkm.common import readDistribution, findNearest
from checkm.genomicSignatures import GenomicSignatures
from checkm.binTools import BinTools
class TetraDistPlots(AbstractPlot):
def __init__(self, options):
AbstractPlot.__init__(self, options)
def plot(self, fastaFile, tetraSigs, distributionsToPlot):
# Set size of figure
self.fig.clear()
self.fig.set_size_inches(self.options.width, self.options.height)
axesHist = self.fig.add_subplot(121)
axesDeltaTD = self.fig.add_subplot(122)
self.plotOnAxes(fastaFile, tetraSigs, distributionsToPlot, axesHist, axesDeltaTD)
self.fig.tight_layout(pad=1, w_pad=1)
self.draw()
def plotOnAxes(self, fastaFile, tetraSigs, distributionsToPlot, axesHist, axesDeltaTD):
# Read reference distributions from file
dist = readDistribution('td_dist')
# get tetranucleotide signature for bin
seqs = readFasta(fastaFile)
binTools = BinTools()
binSig = binTools.binTetraSig(seqs, tetraSigs)
# get tetranucleotide distances for windows
genomicSig = GenomicSignatures(K=4, threads=1)
data = []
seqLens = []
deltaTDs = []
for seqId, seq in seqs.iteritems():
start = 0
end = self.options.td_window_size
seqLen = len(seq)
seqLens.append(seqLen)
deltaTDs.append(genomicSig.distance(tetraSigs[seqId], binSig))
while(end < seqLen):
windowSig = genomicSig.seqSignature(seq[start:end])
data.append(genomicSig.distance(windowSig, binSig))
start = end
end += self.options.td_window_size
if len(data) == 0:
axesHist.set_xlabel('[Error] No seqs >= %d, the specified window size' % self.options.td_window_size)
return
deltaTDs = np.array(deltaTDs)
# Histogram plot
bins = [0.0]
binWidth = self.options.td_bin_width
binEnd = binWidth
while binEnd <= 1.0:
bins.append(binEnd)
binEnd += binWidth
axesHist.hist(data, bins=bins, normed=True, color=(0.5, 0.5, 0.5))
axesHist.set_xlabel(r'$\Delta$ TD')
axesHist.set_ylabel('% windows (' + str(self.options.td_window_size) + ' bp)')
# Prettify plot
for a in axesHist.yaxis.majorTicks:
a.tick1On = True
a.tick2On = False
for a in axesHist.xaxis.majorTicks:
a.tick1On = True
a.tick2On = False
for line in axesHist.yaxis.get_ticklines():
line.set_color(self.axesColour)
for line in axesHist.xaxis.get_ticklines():
line.set_color(self.axesColour)
for loc, spine in axesHist.spines.iteritems():
if loc in ['right', 'top']:
spine.set_color('none')
else:
spine.set_color(self.axesColour)
# get CD bin statistics
meanTD, deltaTDs = binTools.tetraDiffDist(seqs, genomicSig, tetraSigs, binSig)
# Delta-TD vs Sequence length plot
axesDeltaTD.scatter(deltaTDs, seqLens, c=abs(deltaTDs), s=10, lw=0.5, cmap=pylab.cm.Greys)
axesDeltaTD.set_xlabel(r'$\Delta$ TD (mean TD = %.2f)' % meanTD)
axesDeltaTD.set_ylabel('Sequence length (kbp)')
_, yMaxSeqs = axesDeltaTD.get_ylim()
xMinSeqs, xMaxSeqs = axesDeltaTD.get_xlim()
# plot reference distributions
for distToPlot in distributionsToPlot:
boundKey = findNearest(dist[dist.keys()[0]].keys(), distToPlot)
x = []
y = []
for windowSize in dist:
x.append(dist[windowSize][boundKey])
y.append(windowSize)
# sort by y-values
sortIndexY = np.argsort(y)
x = np.array(x)[sortIndexY]
y = np.array(y)[sortIndexY]
# make sure x-values are strictly decreasing as y increases
# as this is conservative and visually satisfying
for i in xrange(0, len(x) - 1):
for j in xrange(i + 1, len(x)):
if x[j] > x[i]:
if j == len(x) - 1:
x[j] = x[i]
else:
x[j] = (x[j - 1] + x[j + 1]) / 2 # interpolate values from neighbours
if x[j] > x[i]:
x[j] = x[i]
axesDeltaTD.plot(x, y, 'r--', lw=0.5, zorder=0)
# ensure y-axis include zero and covers all sequences
axesDeltaTD.set_ylim([0, yMaxSeqs])
# ensure x-axis is set appropriately for sequences
axesDeltaTD.set_xlim([xMinSeqs, xMaxSeqs])
# draw vertical line at x=0
axesDeltaTD.vlines(0, 0, yMaxSeqs, linestyle='dashed', color=self.axesColour, zorder=0)
# Change sequence lengths from bp to kbp
yticks = axesDeltaTD.get_yticks()
kbpLabels = []
for seqLen in yticks:
label = '%.1f' % (float(seqLen) / 1000)
label = label.replace('.0', '') # remove trailing zero
kbpLabels.append(label)
axesDeltaTD.set_yticklabels(kbpLabels)
# Prettify plot
for a in axesDeltaTD.yaxis.majorTicks:
a.tick1On = True
a.tick2On = False
for a in axesDeltaTD.xaxis.majorTicks:
a.tick1On = True
a.tick2On = False
for line in axesDeltaTD.yaxis.get_ticklines():
line.set_color(self.axesColour)
for line in axesDeltaTD.xaxis.get_ticklines():
line.set_color(self.axesColour)
for loc, spine in axesDeltaTD.spines.iteritems():
if loc in ['right', 'top']:
spine.set_color('none')
else:
spine.set_color(self.axesColour)
| gpl-3.0 |
arokem/nipy | examples/labs/need_data/histogram_fits.py | 4 | 2055 | #!/usr/bin/env python
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from __future__ import print_function # Python 2/3 compatibility
"""
Example of a script that perfoms histogram analysis of an activation image.
This is based on a real fMRI image.
Simply modify the input image path to make it work on your preferred image.
Needs matplotlib
Author : Bertrand Thirion, 2008-2009
"""
import os
import numpy as np
import scipy.stats as st
try:
import matplotlib.pyplot as plt
except ImportError:
raise RuntimeError("This script needs the matplotlib library")
from nibabel import load
import nipy.algorithms.statistics.empirical_pvalue as en
# Local import
from get_data_light import DATA_DIR, get_second_level_dataset
# parameters
verbose = 1
theta = float(st.t.isf(0.01, 100))
# paths
mask_image = os.path.join(DATA_DIR, 'mask.nii.gz')
input_image = os.path.join(DATA_DIR, 'spmT_0029.nii.gz')
if (not os.path.exists(mask_image)) or (not os.path.exists(input_image)):
get_second_level_dataset()
# Read the mask
nim = load(mask_image)
mask = nim.get_data()
# read the functional image
rbeta = load(input_image)
beta = rbeta.get_data()
beta = beta[mask > 0]
mf = plt.figure(figsize=(13, 5))
a1 = plt.subplot(1, 3, 1)
a2 = plt.subplot(1, 3, 2)
a3 = plt.subplot(1, 3, 3)
# fit beta's histogram with a Gamma-Gaussian mixture
bfm = np.array([2.5, 3.0, 3.5, 4.0, 4.5])
bfp = en.gamma_gaussian_fit(beta, bfm, verbose=1, mpaxes=a1)
# fit beta's histogram with a mixture of Gaussians
alpha = 0.01
pstrength = 100
bfq = en.three_classes_GMM_fit(beta, bfm, alpha, pstrength,
verbose=1, mpaxes=a2)
# fit the null mode of beta with the robust method
efdr = en.NormalEmpiricalNull(beta)
efdr.learn()
efdr.plot(bar=0, mpaxes=a3)
a1.set_title('Fit of the density with \n a Gamma-Gaussian mixture')
a2.set_title('Fit of the density with \n a mixture of Gaussians')
a3.set_title('Robust fit of the density \n with a single Gaussian')
plt.show()
| bsd-3-clause |
QuantEcon/QuantEcon.py | quantecon/tests/test_quad.py | 2 | 16491 | """
Tests for quad.py
Notes
-----
Many of tests were derived from the file demqua## in the CompEcon
toolbox.
For all other tests, the MATLAB code is provided here in
a section of comments.
"""
import os
import unittest
from scipy.io import loadmat
import numpy as np
from numpy.testing import assert_allclose
import pandas as pd
from quantecon.quad import (
qnwcheb, qnwequi, qnwlege, qnwnorm, qnwlogn,
qnwsimp, qnwtrap, qnwunif, quadrect, qnwbeta,
qnwgamma
)
from quantecon.tests.util import get_data_dir
### MATLAB code needed to generate data (in addition to a modified demqua03)
# % set random number seed so we get the same random nums as in python
# rng(42)
# % 1-d parameters -- just some random numbers
# a = -2.0
# b = 3.0
# n = 11
# % 3-d parameters -- just some random numbers
# a_3 = [-1.0 -2.0 1.0]
# b_3 = [1.0 12.0 1.5]
# n_3 = [7 5 9]
# mu_3d = [1.0 2.0 2.5]
# sigma2_3d = [1.0 0.1 0.0; 0.1 1.0 0.0; 0.0 0.0 1.2]
# % 1-d nodes and weights
# [x_cheb_1 w_cheb_1] = qnwcheb(n, a, b)
# [x_equiN_1 w_equiN_1] = qnwequi(n, a, b, 'N')
# [x_equiW_1 w_equiW_1] = qnwequi(n, a, b, 'W')
# [x_equiH_1 w_equiH_1] = qnwequi(n, a, b, 'H')
# rng(41); [x_equiR_1 w_equiR_1] = qnwequi(n, a, b, 'R')
# [x_lege_1 w_lege_1] = qnwlege(n, a, b)
# [x_norm_1 w_norm_1] = qnwnorm(n, a, b)
# [x_logn_1 w_logn_1] = qnwlogn(n, a, b)
# [x_simp_1 w_simp_1] = qnwsimp(n, a, b)
# [x_trap_1 w_trap_1] = qnwtrap(n, a, b)
# [x_unif_1 w_unif_1] = qnwunif(n, a, b)
# [x_beta_1 w_beta_1] = qnwbeta(n, b, b+1)
# [x_gamm_1 w_gamm_1] = qnwgamma(n, b)
# % 3-d nodes and weights
# [x_cheb_3 w_cheb_3] = qnwcheb(n_3, a_3, b_3)
# rng(42); [x_equiN_3 w_equiN_3] = qnwequi(n_3, a_3, b_3, 'N')
# [x_equiW_3 w_equiW_3] = qnwequi(n_3, a_3, b_3, 'W')
# [x_equiH_3 w_equiH_3] = qnwequi(n_3, a_3, b_3, 'H')
# [x_equiR_3 w_equiR_3] = qnwequi(n_3, a_3, b_3, 'R')
# [x_lege_3 w_lege_3] = qnwlege(n_3, a_3, b_3)
# [x_norm_3 w_norm_3] = qnwnorm(n_3, mu_3d, sigma2_3d)
# [x_logn_3 w_logn_3] = qnwlogn(n_3, mu_3d, sigma2_3d)
# [x_simp_3 w_simp_3] = qnwsimp(n_3, a_3, b_3)
# [x_trap_3 w_trap_3] = qnwtrap(n_3, a_3, b_3)
# [x_unif_3 w_unif_3] = qnwunif(n_3, a_3, b_3)
# [x_beta_3 w_beta_3] = qnwbeta(n_3, b_3, b_3+1.0)
# [x_gamm_3 w_gamm_3] = qnwgamma(n_3, b_3)
### End MATLAB commands
data_dir = get_data_dir()
data = loadmat(os.path.join(data_dir, "matlab_quad.mat"), squeeze_me=True)
# Unpack parameters from MATLAB
a = data['a']
b = data['b']
n = data['n']
a_3 = data['a_3']
b_3 = data['b_3']
n_3 = data['n_3']
mu_3d = data['mu_3d']
sigma2_3d = data['sigma2_3d']
class TestQuadrect(unittest.TestCase):
@classmethod
def setUpClass(cls):
## Create Python Data for quadrect
# Create the python data -- similar to notebook code
kinds = ["trap", "simp", "lege", "N", "W", "H", "R"]
# Define some functions
f1 = lambda x: np.exp(-x)
f2 = lambda x: 1.0 / (1.0 + 25.0 * x**2.0)
f3 = lambda x: np.abs(x) ** 0.5
func_names = ["f1", "f2", "f3"]
# Integration parameters
n = np.array([5, 11, 21, 51, 101, 401]) # number of nodes
np.random.seed(42) # same seed as ML code.
a, b = -1, 1 # endpoints
# Set up pandas DataFrame to hold results
ind = pd.MultiIndex.from_product([func_names, n])
ind.names = ["Function", "Number of Nodes"]
cols = pd.Index(kinds, name="Kind")
quad_rect_res1d = pd.DataFrame(index=ind, columns=cols, dtype=float)
for i, func in enumerate([f1, f2, f3]):
func_name = func_names[i]
for kind in kinds:
for num in n:
num_in = num ** 2 if len(kind) == 1 else num
quad_rect_res1d.loc[func_name, num][kind] = quadrect(func,
num_in,
a, b,
kind)
cls.data1d = quad_rect_res1d
# Now 2d data
kinds2 = ["lege", "trap", "simp", "N", "W", "H", "R"]
f1_2 = lambda x: np.exp(x[:, 0] + x[:, 1])
f2_2 = lambda x: np.exp(-x[:, 0] * np.cos(x[:, 1]**2))
# Set up pandas DataFrame to hold results
a = ([0, 0], [-1, -1])
b = ([1, 2], [1, 1])
ind_2 = pd.Index(n**2, name="Num Points")
cols2 = pd.Index(kinds2, name="Kind")
data2 = pd.DataFrame(index=ind_2, columns=cols2, dtype=float)
for num in n:
for kind in kinds2[:4]:
data2.loc[num**2][kind] = quadrect(f1_2, [num, num],
a[0], b[0], kind)
for kind in kinds2[4:]:
data2.loc[num**2][kind] = quadrect(f1_2, num**2, a[0],
b[0], kind)
cls.data2d1 = data2
n3 = 10 ** (2 + np.array([1, 2, 3]))
ind_3 = pd.Index(n3, name="Num Points")
cols3 = pd.Index(kinds2[3:])
data3 = pd.DataFrame(index=ind_3, columns=cols3, dtype=float)
for num in n3:
for kind in kinds2[3:]:
data3.loc[num][kind] = quadrect(f2_2, num, a[1], b[1], kind)
cls.data2d2 = data3
## Organize MATLAB Data
ml_data = pd.DataFrame(index=ind, columns=cols, dtype=float)
ml_data.iloc[:6, :] = data['int_1d'][:, :, 0]
ml_data.iloc[6:12, :] = data['int_1d'][:, :, 1]
ml_data.iloc[12:18, :] = data['int_1d'][:, :, 2]
ml_data2 = pd.DataFrame(index=ind_2, columns=cols2, dtype=float)
ml_data2.iloc[:, :] = data['int_2d1']
ml_data3 = pd.DataFrame(index=ind_3, columns=cols3, dtype=float)
ml_data3.iloc[:, :] = data['int_2d2']
cls.ml_data1d = ml_data
cls.ml_data2d1 = ml_data2
cls.ml_data2d2 = ml_data3
def test_quadrect_1d_lege(self):
assert_allclose(self.data1d['lege'], self.ml_data1d['lege'])
def test_quadrect_1d_trap(self):
assert_allclose(self.data1d['trap'], self.ml_data1d['trap'])
def test_quadrect_1d_simp(self):
assert_allclose(self.data1d['simp'], self.ml_data1d['simp'])
def test_quadrect_1d_R(self):
assert_allclose(self.data1d['R'], self.ml_data1d['R'])
def test_quadrect_1d_W(self):
assert_allclose(self.data1d['W'], self.ml_data1d['W'])
def test_quadrect_1d_N(self):
assert_allclose(self.data1d['N'], self.ml_data1d['N'])
def test_quadrect_1d_H(self):
assert_allclose(self.data1d['H'], self.ml_data1d['H'])
def test_quadrect_2d_lege(self):
assert_allclose(self.data2d1['lege'], self.ml_data2d1['lege'])
def test_quadrect_2d_trap(self):
assert_allclose(self.data2d1['trap'], self.ml_data2d1['trap'])
def test_quadrect_2d_simp(self):
assert_allclose(self.data2d1['simp'], self.ml_data2d1['simp'])
# NOTE: The R tests will fail in more than 1 dimension. This is a
# function of MATLAB and numpy storing arrays in different
# "order". See comment in TestQnwequiR.setUpClass
# def test_quadrect_2d_R(self):
# assert_allclose(self.data2d1['R'], self.ml_data2d1['R'])
def test_quadrect_2d_W(self):
assert_allclose(self.data2d1['W'], self.ml_data2d1['W'])
def test_quadrect_2d_N(self):
assert_allclose(self.data2d1['N'], self.ml_data2d1['N'])
def test_quadrect_2d_H(self):
assert_allclose(self.data2d1['H'], self.ml_data2d1['H'])
def test_quadrect_2d_W2(self):
assert_allclose(self.data2d2['W'], self.ml_data2d2['W'])
def test_quadrect_2d_N2(self):
assert_allclose(self.data2d2['N'], self.ml_data2d2['N'])
def test_quadrect_2d_H2(self):
assert_allclose(self.data2d2['H'], self.ml_data2d2['H'])
class TestQnwcheb(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.x_cheb_1, cls.w_cheb_1 = qnwcheb(n, a, b)
cls.x_cheb_3, cls.w_cheb_3 = qnwcheb(n_3, a_3, b_3)
def test_qnwcheb_nodes_1d(self):
assert_allclose(self.x_cheb_1, data['x_cheb_1'])
def test_qnwcheb_nodes_3d(self):
assert_allclose(self.x_cheb_3, data['x_cheb_3'])
def test_qnwcheb_weights_1d(self):
assert_allclose(self.w_cheb_1, data['w_cheb_1'])
def test_qnwcheb_weights_3d(self):
assert_allclose(self.w_cheb_3, data['w_cheb_3'])
class TestQnwequiN(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.x_equiN_1, cls.w_equiN_1 = qnwequi(n, a, b, "N")
cls.x_equiN_3, cls.w_equiN_3 = qnwequi(n_3, a_3, b_3, "N")
def test_qnwequiN_nodes_1d(self):
assert_allclose(self.x_equiN_1, data['x_equiN_1'])
def test_qnwequiN_nodes_3d(self):
assert_allclose(self.x_equiN_3, data['x_equiN_3'])
def test_qnwequiN_weights_1d(self):
assert_allclose(self.w_equiN_1, data['w_equiN_1'])
def test_qnwequiN_weights_3d(self):
assert_allclose(self.w_equiN_3, data['w_equiN_3'])
class TestQnwequiW(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.x_equiW_1, cls.w_equiW_1 = qnwequi(n, a, b, "W")
cls.x_equiW_3, cls.w_equiW_3 = qnwequi(n_3, a_3, b_3, "W")
def test_qnwequiW_nodes_1d(self):
assert_allclose(self.x_equiW_1, data['x_equiW_1'])
def test_qnwequiW_nodes_3d(self):
assert_allclose(self.x_equiW_3, data['x_equiW_3'])
def test_qnwequiW_weights_1d(self):
assert_allclose(self.w_equiW_1, data['w_equiW_1'])
def test_qnwequiW_weights_3d(self):
assert_allclose(self.w_equiW_3, data['w_equiW_3'])
class TestQnwequiH(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.x_equiH_1, cls.w_equiH_1 = qnwequi(n, a, b, "H")
cls.x_equiH_3, cls.w_equiH_3 = qnwequi(n_3, a_3, b_3, "H")
def test_qnwequiH_nodes_1d(self):
assert_allclose(self.x_equiH_1, data['x_equiH_1'])
def test_qnwequiH_nodes_3d(self):
assert_allclose(self.x_equiH_3, data['x_equiH_3'])
def test_qnwequiH_weights_1d(self):
assert_allclose(self.w_equiH_1, data['w_equiH_1'])
def test_qnwequiH_weights_3d(self):
assert_allclose(self.w_equiH_3, data['w_equiH_3'])
class TestQnwequiR(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.x_equiR_1, cls.w_equiR_1 = qnwequi(n, a, b, "R", random_state=41)
temp, cls.w_equiR_3 = qnwequi(n_3, a_3, b_3, "R", random_state=42)
# NOTE: I need to do a little magic here. MATLAB and numpy
# are generating the same random numbers, but MATLAB is
# column major and numpy is row major, so they are stored
# in different places for multi-dimensional arrays.
# The ravel, reshape code here moves the numpy nodes into
# the same relative position as the MATLAB ones. Also, in
# order for this to work I have to undo the shifting of
# the nodes, re-organize data, then re-shift. If this
# seems like voodoo to you, it kinda is. But, the fact
# that the test can pass after this kind of manipulation
# is a strong indicator that we are doing it correctly
unshifted = (temp - a_3) / (b_3 - a_3)
reshaped = np.ravel(unshifted).reshape(315, 3, order='F')
reshifted = a_3 + reshaped * (b_3 - a_3)
cls.x_equiR_3 = reshifted
def test_qnwequiR_nodes_1d(self):
assert_allclose(self.x_equiR_1, data['x_equiR_1'])
def test_qnwequiR_nodes_3d(self):
assert_allclose(self.x_equiR_3, data['x_equiR_3'])
def test_qnwequiR_weights_1d(self):
assert_allclose(self.w_equiR_1, data['w_equiR_1'])
def test_qnwequiR_weights_3d(self):
assert_allclose(self.w_equiR_3, data['w_equiR_3'])
class TestQnwlege(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.x_lege_1, cls.w_lege_1 = qnwlege(n, a, b)
cls.x_lege_3, cls.w_lege_3 = qnwlege(n_3, a_3, b_3)
def test_qnwlege_nodes_1d(self):
assert_allclose(self.x_lege_1, data['x_lege_1'])
def test_qnwlege_nodes_3d(self):
assert_allclose(self.x_lege_3, data['x_lege_3'])
def test_qnwlege_weights_1d(self):
assert_allclose(self.w_lege_1, data['w_lege_1'])
def test_qnwlege_weights_3d(self):
assert_allclose(self.w_lege_3, data['w_lege_3'])
class TestQnwnorm(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.x_norm_1, cls.w_norm_1 = qnwnorm(n, a, b)
cls.x_norm_3, cls.w_norm_3 = qnwnorm(n_3, mu_3d, sigma2_3d)
def test_qnwnorm_nodes_1d(self):
assert_allclose(self.x_norm_1, data['x_norm_1'])
def test_qnwnorm_nodes_3d(self):
assert_allclose(self.x_norm_3, data['x_norm_3'])
def test_qnwnorm_weights_1d(self):
assert_allclose(self.w_norm_1, data['w_norm_1'])
def test_qnwnorm_weights_3d(self):
assert_allclose(self.w_norm_3, data['w_norm_3'])
class TestQnwlogn(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.x_logn_1, cls.w_logn_1 = qnwlogn(n, a, b)
cls.x_logn_3, cls.w_logn_3 = qnwlogn(n_3, mu_3d, sigma2_3d)
def test_qnwlogn_nodes_1d(self):
assert_allclose(self.x_logn_1, data['x_logn_1'])
def test_qnwlogn_nodes_3d(self):
assert_allclose(self.x_logn_3, data['x_logn_3'])
def test_qnwlogn_weights_1d(self):
assert_allclose(self.w_logn_1, data['w_logn_1'])
def test_qnwlogn_weights_3d(self):
assert_allclose(self.w_logn_3, data['w_logn_3'])
class TestQnwsimp(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.x_simp_1, cls.w_simp_1 = qnwsimp(n, a, b)
cls.x_simp_3, cls.w_simp_3 = qnwsimp(n_3, a_3, b_3)
def test_qnwsimp_nodes_1d(self):
assert_allclose(self.x_simp_1, data['x_simp_1'])
def test_qnwsimp_nodes_3d(self):
assert_allclose(self.x_simp_3, data['x_simp_3'])
def test_qnwsimp_weights_1d(self):
assert_allclose(self.w_simp_1, data['w_simp_1'])
def test_qnwsimp_weights_3d(self):
assert_allclose(self.w_simp_3, data['w_simp_3'])
class TestQnwtrap(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.x_trap_1, cls.w_trap_1 = qnwtrap(n, a, b)
cls.x_trap_3, cls.w_trap_3 = qnwtrap(n_3, a_3, b_3)
def test_qnwtrap_nodes_1d(self):
assert_allclose(self.x_trap_1, data['x_trap_1'])
def test_qnwtrap_nodes_3d(self):
assert_allclose(self.x_trap_3, data['x_trap_3'])
def test_qnwtrap_weights_1d(self):
assert_allclose(self.w_trap_1, data['w_trap_1'])
def test_qnwtrap_weights_3d(self):
assert_allclose(self.w_trap_3, data['w_trap_3'])
class TestQnwunif(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.x_unif_1, cls.w_unif_1 = qnwunif(n, a, b)
cls.x_unif_3, cls.w_unif_3 = qnwunif(n_3, a_3, b_3)
def test_qnwunif_nodes_1d(self):
assert_allclose(self.x_unif_1, data['x_unif_1'])
def test_qnwunif_nodes_3d(self):
assert_allclose(self.x_unif_3, data['x_unif_3'])
def test_qnwunif_weights_1d(self):
assert_allclose(self.w_unif_1, data['w_unif_1'])
def test_qnwunif_weights_3d(self):
assert_allclose(self.w_unif_3, data['w_unif_3'])
class TestQnwbeta(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.x_beta_1, cls.w_beta_1 = qnwbeta(n, b, b + 1.0)
cls.x_beta_3, cls.w_beta_3 = qnwbeta(n_3, b_3, b_3 + 1.0)
def test_qnwbeta_nodes_1d(self):
assert_allclose(self.x_beta_1, data['x_beta_1'])
def test_qnwbeta_nodes_3d(self):
assert_allclose(self.x_beta_3, data['x_beta_3'])
def test_qnwbeta_weights_1d(self):
assert_allclose(self.w_beta_1, data['w_beta_1'])
def test_qnwbeta_weights_3d(self):
assert_allclose(self.w_beta_3, data['w_beta_3'])
class TestQnwgamm(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.x_gamm_1, cls.w_gamm_1 = qnwgamma(n, b)
cls.x_gamm_3, cls.w_gamm_3 = qnwgamma(n_3, b_3)
def test_qnwgamm_nodes_1d(self):
assert_allclose(self.x_gamm_1, data['x_gamm_1'])
def test_qnwgamm_nodes_3d(self):
assert_allclose(self.x_gamm_3, data['x_gamm_3'])
def test_qnwgamm_weights_1d(self):
assert_allclose(self.w_gamm_1, data['w_gamm_1'])
def test_qnwgamm_weights_3d(self):
assert_allclose(self.w_gamm_3, data['w_gamm_3'])
| bsd-3-clause |
sanuj/opencog | opencog/python/spatiotemporal/temporal_events/relation_formulas.py | 33 | 19534 | from math import fabs, sqrt, floor
from numpy import convolve, NINF as NEGATIVE_INFINITY, PINF as POSITIVE_INFINITY
import numpy
from scipy.stats.distributions import uniform_gen
from spatiotemporal.temporal_events.util import calculate_bounds_of_probability_distribution
from spatiotemporal.temporal_interval_handling import calculateCenterMass
from spatiotemporal.time_intervals import TimeInterval
from utility.functions import FunctionPiecewiseLinear, FunctionHorizontalLinear, integral, FUNCTION_ZERO, almost_equals
DECOMPOSITION_PRECISION = 10 ** 14
__author__ = 'keyvan'
TEMPORAL_RELATIONS = {
'p': 'precedes',
'm': 'meets',
'o': 'overlaps',
'F': 'finished by',
'D': 'contains',
's': 'starts',
'e': 'equals',
'S': 'started by',
'd': 'during',
'f': 'finishes',
'O': 'overlapped by',
'M': 'met by',
'P': 'preceded by'
}
class TemporalRelation(dict):
all_relations = 'pmoFDseSdfOMP'
_type = None
_list = None
_vector = None
@staticmethod
def from_list(list_object):
relation = TemporalRelation()
for i, name in enumerate(TemporalRelation.all_relations):
value = list_object[i]
if not isinstance(value, (int, float)):
value = float(value)
relation[name] = value
return relation
def to_list(self):
if self._list is None:
self._list = []
for name in self.all_relations:
self._list.append(self[name])
return self._list
def to_vector(self):
if self._vector is None:
_list = self.to_list()
self._vector = numpy.array(_list)
return self._vector
@property
def type(self):
if self._type is None:
self._type = ''.join([name for name in TemporalRelation.all_relations if self[name] > 0])
return self._type
def __setitem__(self, relation_name, value):
if relation_name not in TemporalRelation.all_relations:
raise AttributeError("'{0}' is not a valid Allen relation'".format(relation_name))
dict.__setitem__(self, relation_name, floor(value * DECOMPOSITION_PRECISION) / DECOMPOSITION_PRECISION)
def __repr__(self):
return 'TemporalRelation({0})'.format(self.type)
def __str__(self):
return repr(self)
def __hash__(self):
return hash(tuple(self.to_list()))
class BaseRelationFormula(object):
def __init__(self):
self.bounds = {}
def duration_of(self, dist):
a, b = self.bounds_of(dist)
return fabs(a - b)
def bounds_of(self, dist):
# if dist in self.bounds:
# return self.bounds[dist]
bounds = calculate_bounds_of_probability_distribution(dist)
self.bounds[dist] = bounds
return bounds
def before_point(self, point_1_value, point_2_value):
return 0
def same_point(self, point_1_value, point_2_value):
return 1 - fabs(self.before_point(point_1_value,
point_2_value) - self.after_point(point_1_value, point_2_value))
def after_point(self, point_1_value, point_2_value):
return self.before_point(point_2_value, point_1_value)
def before_integral_bounds(self, dist_1, dist_2):
return calculate_bounds_of_probability_distribution(dist_1)
def same_integral_bounds(self, dist_1, dist_2):
dist_1_a, dist_1_b = calculate_bounds_of_probability_distribution(dist_1)
dist_2_a, dist_2_b = calculate_bounds_of_probability_distribution(dist_2)
return max(dist_1_a, dist_2_a), min(dist_1_b, dist_2_b)
def after_integral_bounds(self, dist_1, dist_2):
return calculate_bounds_of_probability_distribution(dist_2)
def before(self, dist_1, dist_2):
return integral(lambda x: self.before_point(dist_1.pdf(x), dist_2.pdf(x)),
*self.before_integral_bounds(dist_1, dist_2))
def same(self, dist_1, dist_2):
return integral(lambda x: self.same_point(dist_1.pdf(x), dist_2.pdf(x)),
*self.same_integral_bounds(dist_1, dist_2))
def after(self, dist_1, dist_2):
return integral(lambda x: self.after_point(dist_1.pdf(x), dist_2.pdf(x)),
*self.after_integral_bounds(dist_1, dist_2))
def compare(self, dist_1, dist_2):
"""
returns before, same and after
"""
return self.before(dist_1, dist_2), self.same(dist_1, dist_2), self.after(dist_1, dist_2)
class FormulaCreator(object):
def __init__(self, relation_formula):
self.relation_formula = relation_formula
def temporal_relations_between(self, temporal_event_1, temporal_event_2):
dist_1_beginning, dist_1_ending = temporal_event_1.distribution_beginning, temporal_event_1.distribution_ending
dist_2_beginning, dist_2_ending = temporal_event_2.distribution_beginning, temporal_event_2.distribution_ending
self.relation_formula.bounds[dist_1_beginning] = temporal_event_1.a, temporal_event_1.beginning
self.relation_formula.bounds[dist_1_ending] = temporal_event_1.ending, temporal_event_1.b
self.relation_formula.bounds[dist_2_beginning] = temporal_event_2.a, temporal_event_2.beginning
self.relation_formula.bounds[dist_2_ending] = temporal_event_2.ending, temporal_event_2.b
combinations = [
(dist_1_beginning, dist_2_beginning),
(dist_1_beginning, dist_2_ending),
(dist_1_ending, dist_2_beginning),
(dist_1_ending, dist_2_ending)
]
return self.calculate_relations(combinations)
def calculate_relations(self, combinations=None):
"""
Calculates the values of the 13 relations based on the before, same,
and after values of the combinations between the beginning and
ending distributions of the two intervals obtained, e.g. from
the DecompositionFitter.
:param combinations: the 4 combinations between beginning and ending
distribution
:return: a dictionary containing the 13 relations as keys and their
degrees as values
"""
if combinations is None:
combinations = self.relation_formula.combinations
dist_1_beginning, dist_2_beginning = combinations[0]
dist_1_ending, dist_2_ending = combinations[3]
before = {}
same = {}
after = {}
# iterates over the 4 combinations between beginning and ending
for key in combinations:
before[key], same[key], after[key] = self.relation_formula.compare(*key)
result = TemporalRelation()
result['p'] = before[dist_1_beginning, dist_2_beginning] * before[dist_1_beginning, dist_2_ending] * \
before[dist_1_ending, dist_2_beginning] * before[dist_1_ending, dist_2_ending]
result['m'] = before[dist_1_beginning, dist_2_beginning] * before[dist_1_beginning, dist_2_ending] * \
same[dist_1_ending, dist_2_beginning] * before[dist_1_ending, dist_2_ending]
result['o'] = before[dist_1_beginning, dist_2_beginning] * before[dist_1_beginning, dist_2_ending] * \
after[dist_1_ending, dist_2_beginning] * before[dist_1_ending, dist_2_ending]
result['F'] = before[dist_1_beginning, dist_2_beginning] * before[dist_1_beginning, dist_2_ending] * \
after[dist_1_ending, dist_2_beginning] * same[dist_1_ending, dist_2_ending]
result['D'] = before[dist_1_beginning, dist_2_beginning] * before[dist_1_beginning, dist_2_ending] * \
after[dist_1_ending, dist_2_beginning] * after[dist_1_ending, dist_2_ending]
result['s'] = same[dist_1_beginning, dist_2_beginning] * before[dist_1_beginning, dist_2_ending] * \
after[dist_1_ending, dist_2_beginning] * before[dist_1_ending, dist_2_ending]
result['e'] = same[dist_1_beginning, dist_2_beginning] * before[dist_1_beginning, dist_2_ending] * \
after[dist_1_ending, dist_2_beginning] * same[dist_1_ending, dist_2_ending]
result['S'] = same[dist_1_beginning, dist_2_beginning] * before[dist_1_beginning, dist_2_ending] * \
after[dist_1_ending, dist_2_beginning] * after[dist_1_ending, dist_2_ending]
result['d'] = after[dist_1_beginning, dist_2_beginning] * before[dist_1_beginning, dist_2_ending] * \
after[dist_1_ending, dist_2_beginning] * before[dist_1_ending, dist_2_ending]
result['f'] = after[dist_1_beginning, dist_2_beginning] * before[dist_1_beginning, dist_2_ending] * \
after[dist_1_ending, dist_2_beginning] * same[dist_1_ending, dist_2_ending]
result['O'] = after[dist_1_beginning, dist_2_beginning] * before[dist_1_beginning, dist_2_ending] * \
after[dist_1_ending, dist_2_beginning] * after[dist_1_ending, dist_2_ending]
result['M'] = after[dist_1_beginning, dist_2_beginning] * same[dist_1_beginning, dist_2_ending] * \
after[dist_1_ending, dist_2_beginning] * after[dist_1_ending, dist_2_ending]
result['P'] = after[dist_1_beginning, dist_2_beginning] * after[dist_1_beginning, dist_2_ending] * \
after[dist_1_ending, dist_2_beginning] * after[dist_1_ending, dist_2_ending]
return result
class RelationFormulaConvolution(BaseRelationFormula):
def function_convolution_uniform(self, bounds_1, bounds_2, probability=None):
a1, b1 = bounds_1
a2, b2 = bounds_2
length_1 = fabs(a1 - b1)
length_2 = fabs(a2 - b2)
convolution_bounds_a, convolution_bounds_b = a1 - b2, b1 - a2
trapezium_0, trapezium_1 = convolution_bounds_a, convolution_bounds_a + min(length_2, length_1)
trapezium_2, trapezium_3 = trapezium_1 + fabs(length_1 - length_2), convolution_bounds_b
#assert trapezium_2 + min(length_2, length_1) == trapezium_3
if probability is None:
probability = min(1 / length_1, 1 / length_2)
result = FunctionPiecewiseLinear(
{trapezium_0: 0, trapezium_1: probability, trapezium_2: probability, trapezium_3: 0},
FUNCTION_ZERO)
result.is_normalised = True
return result
def function_convolution(self, dist_1, dist_2, bins=50):
a_1, b_1, a_2, b_2 = 0, 0, 0, 0
if dist_1 in self.bounds:
a_1, b_1 = self.bounds[dist_1]
else:
a_1, b_1 = calculate_bounds_of_probability_distribution(dist_1)
self.bounds[dist_1] = a_1, b_1
if dist_2 in self.bounds:
a_2, b_2 = self.bounds[dist_2]
else:
a_2, b_2 = calculate_bounds_of_probability_distribution(dist_2)
self.bounds[dist_2] = a_2, b_2
if (type(dist_1.dist), type(dist_2.dist)) == (uniform_gen, uniform_gen):
return self.function_convolution_uniform((a_1, b_1), (a_2, b_2))
convolution_bounds_a, convolution_bounds_b = min(a_1, a_2), max(b_1, b_2)
delta = fabs(convolution_bounds_a - convolution_bounds_b) / bins
convolution_interval = TimeInterval(convolution_bounds_a, convolution_bounds_b, bins)
x = [dist_1.pdf(t) for t in convolution_interval]
y = [dist_2.pdf(t) for t in reversed(convolution_interval)]
c = convolve(x, y)
dictionary_convolution = {}
for t in xrange(len(c)):
dictionary_convolution[delta * t] = c[t]
bias = calculateCenterMass(dictionary_convolution)[0] + dist_2.mean() - dist_1.mean()
dictionary_convolution_biased = {}
for t in dictionary_convolution:
dictionary_convolution_biased[t - bias] = dictionary_convolution[t]
convolution_function = FunctionPiecewiseLinear(dictionary_convolution_biased, FunctionHorizontalLinear(0))
return convolution_function.normalised()
def calculate_similarity(self, dist_1, dist_2):
if (type(dist_1.dist), type(dist_2.dist)) == (uniform_gen, uniform_gen):
length_dist_1 = self.duration_of(dist_1)
length_dist_2 = self.duration_of(dist_2)
return min(length_dist_1, length_dist_2) / sqrt(length_dist_1 * length_dist_2)
dist_1_mean, dist_2_mean = dist_1.mean(), dist_2.mean()
dist_1_transformed = lambda t: dist_1.pdf(t + dist_1_mean)
dist_2_transformed = lambda t: dist_2.pdf(t + dist_2_mean)
geometric_mean = lambda t: sqrt(dist_1_transformed(t) * dist_2_transformed(t))
return integral(geometric_mean, NEGATIVE_INFINITY, POSITIVE_INFINITY)
def compare(self, dist_1, dist_2):
convolution = self.function_convolution(dist_1, dist_2)
before = integral(convolution, NEGATIVE_INFINITY, 0)
after = integral(convolution, 0, POSITIVE_INFINITY)
similarity = self.calculate_similarity(dist_1, dist_2)
correlation = 1 - fabs(before - after)
same = similarity * correlation
return before, same, after
class RelationFormulaGeometricMean(BaseRelationFormula):
def compare(self, dist_1, dist_2):
dist_1_interval = TimeInterval(*self.bounds_of(dist_1))
dist_2_interval = TimeInterval(*self.bounds_of(dist_2))
dictionary_input_output = {}
for time_step in dist_1_interval + dist_2_interval:
dictionary_input_output[time_step] = sqrt(dist_1.pdf(time_step) * dist_2.pdf(time_step))
geometric_mean = FunctionPiecewiseLinear(dictionary_input_output, function_undefined=FUNCTION_ZERO)
same = integral(geometric_mean, NEGATIVE_INFINITY, POSITIVE_INFINITY)
dist_1_mean, dist_1_skewness, dist_1_kurtosis = dist_1.stats(moments='msk')
dist_1_standard_deviation = dist_1.std()
dist_2_mean, dist_2_skewness, dist_2_kurtosis = dist_2.stats(moments='msk')
dist_2_standard_deviation = dist_2.std()
distance = fabs(dist_1_standard_deviation - dist_2_standard_deviation) + fabs(dist_1_skewness - dist_2_skewness)
distance += fabs(dist_1_kurtosis - dist_2_kurtosis)
delta = dist_1_mean - dist_2_mean
non_same_portion = 1.0 - same
portion_after, portion_before = 1.0, 0.0
if almost_equals(distance, 0):
if delta < 0:
portion_after, portion_before = 0.0, 1.0
else:
dist_1_standardized_pdf = lambda x: dist_1.pdf(dist_1_standard_deviation * x + dist_1_mean)
dist_2_standardized_pdf = lambda x: dist_2.pdf(dist_2_standard_deviation * x + dist_2_mean)
geometric_mean = lambda t: sqrt(dist_1_standardized_pdf(t) * dist_2_standardized_pdf(t))
geometric_mean_scaled = lambda p: geometric_mean(p / distance)
geometric_mean_scaled_length = max(self.duration_of(dist_1), self.duration_of(dist_2))
dictionary_input_output = {}
for time_step in TimeInterval(-geometric_mean_scaled_length / 2.0, geometric_mean_scaled_length / 2.0):
dictionary_input_output[time_step] = geometric_mean_scaled(time_step)
geometric_mean_scaled = FunctionPiecewiseLinear(dictionary_input_output, function_undefined=FUNCTION_ZERO)
portion_after = integral(geometric_mean_scaled, NEGATIVE_INFINITY, delta)
portion_before = integral(geometric_mean_scaled, delta, POSITIVE_INFINITY)
after = portion_after / (portion_after + portion_before) * non_same_portion
return 1.0 - same - after, same, after
if __name__ == '__main__':
import matplotlib.pyplot as plt
from scipy.stats import norm, uniform, expon
from spatiotemporal.temporal_events import TemporalEvent, TemporalEventPiecewiseLinear
import matplotlib.pyplot as plt
figure_number = 1
for event_1, event_2 in [
(
TemporalEvent(uniform(loc=3, scale=2), uniform(loc=7, scale=9)),
TemporalEvent(uniform(loc=0, scale=10), uniform(loc=13, scale=2))
),
#
# (
# TemporalEvent(uniform(loc=0, scale=2), uniform(loc=3, scale=2)),
# TemporalEvent(uniform(loc=3, scale=2), uniform(loc=6, scale=2))
# ),
#
# (
# TemporalEvent(uniform(loc=1, scale=4), uniform(loc=6, scale=4)),
# TemporalEvent(uniform(loc=8, scale=5), uniform(loc=15, scale=4))
# ),
#
# (
# TemporalEvent(uniform(loc=0, scale=2), uniform(loc=6, scale=4)),
# TemporalEvent(uniform(loc=3, scale=2), uniform(loc=13, scale=4))
# ),
#
# (
# TemporalEvent(uniform(loc=0, scale=7), uniform(loc=8, scale=7)),
# TemporalEvent(uniform(loc=4, scale=1), uniform(loc=11, scale=2)),
# ),
#
# (
# TemporalEvent(uniform(loc=1, scale=4), uniform(loc=6, scale=4)),
# TemporalEvent(uniform(loc=0, scale=11), uniform(loc=13, scale=4))
# ),
#
# (
# TemporalEvent(uniform(loc=1, scale=8), uniform(loc=6, scale=8)),
# TemporalEvent(uniform(loc=0, scale=22), uniform(loc=13, scale=8))
# ),
#
# (
# TemporalEvent(uniform(loc=2, scale=2), uniform(loc=7, scale=2)),
# TemporalEvent(uniform(loc=1, scale=4), uniform(loc=6, scale=4))
# ),
#
# (
# TemporalEvent(uniform(loc=1, scale=2), uniform(loc=4, scale=2)),
# TemporalEvent(uniform(loc=6, scale=2), uniform(loc=9, scale=2))
# ),
#
# (
# TemporalEvent(uniform(loc=0, scale=3), uniform(loc=15, scale=2)),
# TemporalEvent(uniform(loc=5, scale=2), uniform(loc=9, scale=3))
# ),
#
# (
# TemporalEvent(uniform(loc=5, scale=3), uniform(loc=9, scale=2)),
# TemporalEvent(uniform(loc=1, scale=2), uniform(loc=15, scale=3))
# ),
#
# (
# TemporalEvent(uniform(loc=0, scale=2), uniform(loc=10, scale=2)),
# TemporalEvent(uniform(loc=15, scale=2), uniform(loc=25, scale=2))
# ),
#
# (
# TemporalEvent(uniform(loc=15, scale=2), uniform(loc=25, scale=2)),
# TemporalEvent(uniform(loc=0, scale=2), uniform(loc=10, scale=2))
# ),
#
# (
# TemporalEvent(norm(loc=1, scale=4.5), expon(loc=30, scale=2)),
# TemporalEvent(norm(loc=25, scale=4.5), expon(loc=60, scale=2))
# ),
#
# (
# TemporalEvent(expon(loc=1, scale=4.5), norm(loc=30, scale=2)),
# TemporalEvent(expon(loc=25, scale=4.5), norm(loc=60, scale=2))
# ),
#
# (
# TemporalEventPiecewiseLinear({1: 0, 2: 0.1, 3: 0.3, 4: 0.7, 5: 1}, {6: 1, 7: 0.9, 8: 0.6, 9: 0.1, 10: 0}),
# TemporalEventPiecewiseLinear({7.5: 0, 8.5: 0.1, 9.5: 0.3, 10.5: 0.7, 11.5: 1},
# {13: 1, 14.5: 0.9, 15.3: 0.6, 17: 0.1, 20: 0})
# ),
]:
temporal_relations = event_1 * event_2
print '\nFigure' + str(figure_number)
print '----------------------'
print sum(temporal_relations.values())
for p in 'pmoFDseSdfOMP':
print p, temporal_relations[p]
figure_number += 1
event_1.plot(show_distributions=True).ylim(ymin=-0.1, ymax=1.1)
event_2.plot(show_distributions=True).figure()
plt.show()
| agpl-3.0 |
liupfskygre/qiime | qiime/colors.py | 15 | 24391 | #!/usr/bin/env python
# file colors.py
__author__ = "Jesse Stombaugh"
__copyright__ = "Copyright 2011, The QIIME Project" # consider project name
# remember to add yourself
__credits__ = ["Rob Knight", "Jesse Stombaugh", "Yoshiki Vazquez-Baeza"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Jesse Stombaugh"
__email__ = "[email protected]"
"""Code for coloring series based on prefs file.
"""
from colorsys import rgb_to_hsv, hsv_to_rgb
from parse import parse_mapping_file, group_by_field, parse_taxa_summary_table
from numpy import array
from math import floor
import os
import re
from qiime.util import MissingFileError
from qiime.sort import natsort
def string_to_rgb(s):
"""Converts hex string to RGB"""
orig_s = s
s = s.strip()
if s.startswith('#'):
s = s[1:]
if not len(s) == 6:
raise ValueError("String %s doesn't look like a hex string" % orig_s)
return int(s[:2], 16), int(s[2:4], 16), int(s[4:], 16)
def rgb_tuple_to_hsv(rgb):
"""Converts rgb tuple to hsv on Mage's scale"""
rgb_0_to_1 = array(rgb) / 255.0
hsv = rgb_to_hsv(*tuple(rgb_0_to_1))
return hsv[0] * 360, hsv[1] * 100, hsv[2] * 100
def mage_hsv_tuple_to_rgb(hsv):
"""Converts hsv tuple on Mage scale to rgb on 0-255 scale"""
hsv_0_to_1 = hsv[0] / 360.0, hsv[1] / 100.0, hsv[2] / 100.0
rgb = hsv_to_rgb(*tuple(hsv_0_to_1))
return int(rgb[0] * 255), int(rgb[1] * 255), int(rgb[2] * 255)
class Color(object):
"""Stores a color object: name, HSV, ability to write as HTML or Mage.
Note: the reason we store as HSV, not RGB, is that you frequently want
to do gradient colors by hue going from e.g. white to blue, white to red,
etc. Unfortunately, in RGB, you can't specify _which_ white you have
in e.g. #FFFFFF, whereas to get the right gradient you need to be able
to specify that you want (0,0,100) or (180,0,100) or whatever. Hence
the colorspace gymnastics.
"""
def __init__(self, name, coords, colorspace='rgb'):
"""Returns new Color object. Init with name and coords as (R,G,B).
Can also initialize with coords as (H,S,V) or #aabbcc format.
"""
self.Name = name
if isinstance(coords, str): # assume is hex format
self.Coords = rgb_tuple_to_hsv(string_to_rgb(coords))
elif colorspace == 'rgb':
self.Coords = rgb_tuple_to_hsv(tuple(coords))
elif colorspace == 'hsv':
self.Coords = tuple(coords)
else:
raise ValueError(
"Unknown colorspace %s: valid values are rgb, hsv" %
colorspace)
def toRGB(self):
"""Returns self as r, g, b tuple."""
return mage_hsv_tuple_to_rgb(self.Coords)
def toMage(self):
"""Returns self as Mage/KiNG-format string"""
h, s, v = self.Coords
return '@hsvcolor {%s} %3.1f %3.1f %3.1f' % (self.Name, h, s, v)
def toHex(self):
"""Returns self as hex string."""
rgb = self.toRGB()
return ('#%02s%02s%02s' % (hex(rgb[0])[2:], hex(rgb[1])[2:],
hex(rgb[2])[2:])).replace(' ', '0')
def toInt(self):
"""Returns self as hex string."""
rgb = self.toHex()[1:]
return int(float.fromhex(rgb))
def __str__(self):
"""Return string representation of self"""
return str(self.Name) + ':' + self.toHex()
def color_dict_to_objects(d, colorspace='hsv'):
"""Converts color dict to dict of Color objects"""
result = {}
for k, v in d.items():
result[k] = Color(k, v, colorspace)
return result
# Note: these are all in Mage HSV colorspace
'''
These are the old colors
data_color_hsv = {
'aqua': (180, 100, 100),
'blue': (240,100,100),
'fuchsia': (300,100,100),
'gray': (300,0,50.2),
'green': (120,100,50.2),
'lime': (120,100,100),
'maroon': (0,100,50.2),
'olive': (60,100,50.2),
'purple': (300,100,50.2),
'red': (0,100,100),
'silver': (0, 0, 75.3),
'teal': (180,100,50.2),
'yellow': (60,100,100)
}
This is the old order
data_color_order = ['blue','lime','red','aqua','fuchsia','yellow','green', \
'maroon','teal','purple','olive','silver','gray']
'''
data_color_hsv = {
#'black1': (0,0,20),
'red1': (0, 100, 100),
'blue1': (240, 100, 100),
'orange1': (28, 98, 95),
'green1': (120, 100, 50.2),
'purple1': (302, 73, 57),
'yellow1': (60, 100, 100),
'cyan1': (184, 49, 96),
'pink1': (333, 37, 96),
'teal1': (178, 42, 63),
'brown1': (36, 89, 42),
'gray1': (0, 0, 50.2),
'lime': (123, 99, 96),
'red2': (14, 51, 97),
'blue2': (211, 42, 85),
'orange2': (32, 46, 99),
'green2': (142, 36, 79),
'purple2': (269, 29, 75),
'yellow2': (56, 40, 100),
#'black2': (303,100,24),
'gray2': (0, 0, 75.3),
#'teal2': (192,100,24),
'red3': (325, 100, 93),
'blue3': (197, 100, 100),
#'purple3': (271,43,36),
'brown2': (33, 45, 77),
'green3': (60, 100, 50.2),
'purple4': (264, 75, 100),
#'yellow3': (60,66,75),
#'blue4': (213,45,77),
'red4': (348, 31, 74),
'teal3': (180, 100, 50.2),
#'brown3': (60,100,28),
'red5': (0, 100, 50.2),
'green4': (81, 100, 26),
#'purple5': (240,100,41),
'orange3': (26, 100, 65)
#'brown4': (25,100,20),
#'red6': (17,100,63),
#'purple6':(272,100,44)
}
data_color_order = ['red1', 'blue1', 'orange1', 'green1', 'purple1', 'yellow1',
'cyan1', 'pink1', 'teal1', 'brown1', 'gray1', 'lime', 'red2', 'blue2',
'orange2', 'green2', 'purple2', 'yellow2', 'gray2', 'red3',
'blue3', 'brown2', 'green3', 'purple4',
'red4', 'teal3', 'red5', 'green4', 'orange3']
data_colors = color_dict_to_objects(data_color_hsv)
kinemage_colors = [
'hotpink',
'blue',
'lime',
'gold',
'red',
'sea',
'purple',
'green']
def iter_color_groups(mapping, prefs):
"""Iterates over color groups for each category given mapping file/prefs.
See get_group_colors for details of algorithm.
"""
# Iterate through prefs and color by given mapping labels
for key in natsort(prefs.keys()):
col_name = prefs[key]['column']
if 'colors' in prefs[key]:
if isinstance(prefs[key]['colors'], dict):
colors = prefs[key]['colors'].copy() # copy so we can mutate
else:
colors = prefs[key]['colors'][:]
else:
colors = {}
labelname = prefs[key]['column']
# Define groups and associate appropriate colors to each group
groups = group_by_field(mapping, col_name)
colors, data_colors, data_color_order = \
get_group_colors(groups, colors)
yield labelname, groups, colors, data_colors, data_color_order
def get_group_colors(groups, colors, data_colors=data_colors,
data_color_order=data_color_order):
"""Figures out group colors for a specific series based on prefs.
Algorithm is as follows:
- For each name, color pair we know about:
- Check if the name is one of the groups (exact match)
- If it isn't, assume it's a prefix and pull out all the matching groups
- If the color is just a string, set everything to the color with that
name
- Otherwise, assume that either it's a new color we're adding, or that
it's a range for gradient coloring.
- If it's a new color, create it and add it to added_data_colors.
- If it's a gradient, make up all the new colors and add them to
added_data_colors
The current method for gradient coloring of columns (should perhaps
replace with more general method) is to pass in any of the following:
'colors':(('white', (0,0,100)),('red',(0,100,100)))
makes gradient between white and red, applies to all samples
'colors':{'RK':(('white',(0,0,100)),('red',(0,100,100))),
'NF':(('white',(120,0,100)),('green',(120,100,100)))
}
pulls the combination samples starting with RK, colors with
first gradient, then pulls the combination samples starting
with NF, colors with the next gradient.
Return values are:
- colors: dict of {group_value:color_name}
- data_colors: dict of {color_name:color_object}
- data_color_order: order in which the data colors are used/written.
"""
added_data_colors = {}
if isinstance(colors, dict):
# assume we're getting some of the colors out of a dict
if colors.items() != []:
for k, v in sorted(colors.items()):
if k not in groups: # assume is prefix
k_matches = [g for g in groups if g.startswith(k)]
if isinstance(v, str): # just set everything to this color
for m in k_matches:
colors[m] = v
else: # assume is new color or range
first, second = v
if isinstance(first, str): # new named color?
if first not in data_colors:
added_data_colors[first] = Color(first, second)
for m in k_matches:
colors[m] = first
else: # new color range?
start_color, end_color = map(get_color,
[first, second])
num_colors = len(k_matches)
curr_data_colors = color_dict_to_objects(
make_color_dict(start_color,
start_hsv, end_color, end_hsv, num_colors))
curr_colors = {}
color_groups(k_matches, curr_colors,
natsort(curr_data_colors))
colors.update(curr_colors)
added_data_colors.update(curr_data_colors)
del colors[k]
elif not isinstance(v, str): # assume val is new color
color = get_color(v)
if color.Name not in data_colors:
added_data_colors[color.Name] = color
colors[k] = color.Name
# handle any leftover groups
color_groups(groups, colors, data_color_order)
# add new colors
data_colors.update(added_data_colors)
if added_data_colors != {}:
data_color_order.append(''.join(natsort(added_data_colors)))
else:
# handle case where no prefs is used
color_groups(groups, colors, data_color_order)
else:
# handle the case where colors is a tuple for gradients
start_color, end_color = map(get_color, colors)
start_hsv = start_color.Coords
end_hsv = end_color.Coords
num_colors = len(groups)
data_colors = color_dict_to_objects(
make_color_dict(start_color, start_hsv, end_color,
end_hsv, num_colors))
data_color_order = list(natsort(data_colors.keys()))
colors = {}
color_groups(groups, colors, data_color_order)
return colors, data_colors, data_color_order
def get_color(color, data_colors=data_colors):
"""Gets a color by looking up its name or initializing with name+data"""
if isinstance(color, str):
if color in data_colors:
return data_colors[color]
else:
raise ValueError("Color name %s in prefs not recognized" % color)
else:
name, coords = color
if isinstance(coords, str):
colorspace = 'rgb'
else:
colorspace = 'hsv'
return Color(name, coords, colorspace)
def color_groups(groups, colors, data_color_order):
"""Colors a set of groups in data_color_order, handling special colors.
Modifies colors in-place.
Cycles through data colors (i.e. wraps around when last color is reached).
"""
group_num = -1
for g in natsort(groups):
if g not in colors:
group_num += 1
if group_num == len(data_color_order):
group_num = 0
colors[g] = data_color_order[group_num]
def make_color_dict(start_name, start_hsv, end_name, end_hsv, n):
"""Makes dict of color gradient"""
colors = linear_gradient(start_hsv, end_hsv, n)
names = ['%sto%s%s_%s' % (start_name, end_name, n, i) for i in range(n)]
return dict(zip(names, colors))
def combine_map_label_cols(combinecolorby, mapping):
"""Merge two or more mapping columns into one column"""
combinedmapdata = array([''] * len(mapping), dtype='a100')
title = []
match = False
for p in range(len(combinecolorby)):
for i in range(len(mapping[0])):
if str(combinecolorby[p]) == str(mapping[0][i]):
match = True
for q in range(len(mapping)):
combinedmapdata[q] = combinedmapdata[q] + mapping[q][i]
break
else:
match = False
if not match:
raise ValueError(
'One of the columns you tried to combine does not exist!')
title.append(combinecolorby[p])
combinedmapdata[0] = '&&'.join(title)
for i in range(len(combinedmapdata)):
mapping[i].append(combinedmapdata[i])
return mapping
def process_colorby(colorby, data, color_prefs=None):
"""Parses the colorby option from the command line.
color_prefs is required if colorby is not passed.
"""
match = False
prefs = {}
mapping = data['map']
colorbydata = []
if colorby is None and color_prefs is None:
# if coloby option are prefs file not given, color by all categories
# in mapping file
colorbydata = mapping[0]
elif colorby and color_prefs:
# if both the colorby option and prefs file are given, use the categories
# from the colorby option with their appropriate colors in the prefs
# file
prefs_colorby = [color_prefs[i]['column'] for i in color_prefs]
cmd_colorby = colorby.strip().strip("'").split(',')
for i in range(len(cmd_colorby)):
for j in range(len(prefs_colorby)):
if cmd_colorby[i] == prefs_colorby[j]:
colorbydata.append(prefs_colorby[j])
match = True
break
else:
match = False
if not match:
colorbydata.append(cmd_colorby[i])
names = list(colorbydata)
elif colorby:
# if only the colorby option is passed
colorbydata = colorby.strip().strip("'").split(',')
else:
# if only the prefs file is passed
colorbydata = [color_prefs[i]['column'] for i in color_prefs]
names = list(color_prefs)
match = False
for j, col in enumerate(colorbydata):
key = str(col)
# transfer over old color data if it was present
if '&&' in col:
# Create an array using multiple columns from mapping file
combinecolorby = col.split('&&')
data['map'] = combine_map_label_cols(combinecolorby, mapping)
prefs[key] = {}
prefs[key]['column'] = '&&'.join(combinecolorby)
else:
# Color by only one column in mapping file
prefs[key] = {}
prefs[key]['column'] = col
if color_prefs:
for p in color_prefs:
if 'column' in color_prefs[p] and color_prefs[p]['column'] == col:
if 'colors' in color_prefs[p]:
prefs[key]['colors'] = color_prefs[p]['colors']
else:
prefs[key]['colors'] = (
('white', (0, 0, 100)), ('red', (0, 100, 100)))
match = True
break
else:
match = False
if not match:
prefs[key] = {}
prefs[key]['column'] = col
prefs[key]['colors'] = (
('white', (0, 0, 100)), ('red', (0, 100, 100)))
return prefs, data
def linear_gradient(start, end, nbins, eps=1e-10):
"""Makes linear color gradient from start to end, using nbins.
Returns list of (x, y, z) tuples in current colorspace.
eps is used to prevent the case where start and end are the same.
"""
start = array(start)
end = array(end)
result = []
n_minus_1 = max(float(nbins - 1), eps)
for i in range(nbins):
result.append(
list((start * (n_minus_1 - i) / n_minus_1) + (end * (i / n_minus_1))))
return result
# The following functions were not unit_tested, however the parts within
# the functions are unit_tested
def get_map(options, data):
"""Opens and returns mapping data"""
try:
map_f = open(options.map_fname, 'U').readlines()
except (TypeError, IOError):
raise MissingFileError('Mapping file required for this analysis')
data['map'] = parse_mapping_file(map_f)
return data['map']
def map_from_coords(coords):
"""Makes pseudo mapping file from coords.
set data['map'] to result of this if coords file supplied but not map.
TODO: write equivalent function for other inputs, e.g. for rarefaction --
basic principle is that you need data structure that you can extract list
of sample ids from.
"""
result = (([['SampleID', 'Sample']]))
for i in range(len(data['coord'][0])):
data['map'].append([data['coord'][0][i], 'Sample'])
def sample_color_prefs_and_map_data_from_options(options):
"""Returns color prefs and mapping data based on options.
Note: opens files as needed. Only returns the info related to metadata
coloring and category maps. If you need additional info, it is necessary
to get that info explicitly (e.g. coord files, rarefaction files, etc.).
For example, you might modify the data dict afterwards to add coords,
rarefaction info, etc. depending on the application.
"""
data = {}
# Open and get mapping data, if none supplied create a pseudo mapping \
# file
mapping, headers, comments = get_map(options, data)
new_mapping = []
new_mapping.append(headers)
for i in range(len(mapping)):
new_mapping.append(mapping[i])
data['map'] = new_mapping
# need to set some other way from sample ids
# Determine which mapping headers to color by, if none given, color by \
# Sample ID's
try:
colorby = options.colorby
except AttributeError:
colorby = None
if options.prefs_path:
prefs = eval(open(options.prefs_path, 'U').read())
color_prefs, data = process_colorby(colorby, data,
prefs['sample_coloring'])
if 'background_color' in prefs:
background_color = prefs['background_color']
else:
background_color = 'black'
if 'ball_scale' in prefs:
ball_scale = prefs['ball_scale']
else:
ball_scale = 1.0
arrow_colors = {}
if 'arrow_line_color' in prefs:
arrow_colors['line_color'] = prefs['arrow_line_color']
else:
arrow_colors['line_color'] = 'white'
if 'arrow_head_color' in prefs:
arrow_colors['head_color'] = prefs['arrow_head_color']
else:
arrow_colors['head_color'] = 'red'
else:
background_color = 'black'
color_prefs, data = process_colorby(colorby, data, None)
ball_scale = 1.0
arrow_colors = {'line_color': 'white', 'head_color': 'red'}
if options.prefs_path and options.background_color:
background_color = options.background_color
elif options.background_color:
background_color = options.background_color
if background_color == 'black':
label_color = 'white'
else:
label_color = 'black'
return (
color_prefs, data, background_color, label_color, ball_scale, arrow_colors
)
def taxonomy_color_prefs_and_map_data_from_options(options):
"""Returns color prefs and counts data based on options.
counts data is any file in a format that can be parsed by parse_otu_table
"""
data = {}
data['counts'] = {}
taxonomy_levels = []
# need to set some other way from sample ids
# Determine which mapping headers to color by, if none given, color by \
# Sample ID's
taxonomy_count_files = options.counts_fname
for f in taxonomy_count_files:
try:
counts_f = open(f, 'U').readlines()
except (TypeError, IOError):
raise MissingFileError('Counts file required for this analysis')
sample_ids, otu_ids, otu_table = \
parse_taxa_summary_table(counts_f)
data['counts'][f] = (sample_ids, otu_ids, otu_table)
level = max([len(t.split(';')) - 1 for t in otu_ids])
taxonomy_levels.append(str(level))
if options.prefs_path:
prefs = eval(open(options.prefs_path, 'U').read())
color_prefs = taxonomy_process_prefs(taxonomy_levels,
prefs['taxonomy_coloring'])
if 'background_color' in prefs:
background_color = prefs['background_color']
else:
background_color = 'black'
else:
background_color = 'black'
color_prefs = taxonomy_process_prefs(taxonomy_levels, None)
if options.prefs_path and options.background_color:
background_color = options.background_color
elif options.background_color:
background_color = options.background_color
if background_color == 'black':
label_color = 'white'
else:
label_color = 'black'
return color_prefs, data, background_color, label_color
def taxonomy_process_prefs(taxonomy_levels, color_prefs=None):
"""Creates taxonomy prefs dict given specific taxonomy levels.
color_prefs is not required
taxonomy_levels is a list of the level number i.e. Phylum is 2
prefs will include a 'colors' dictionary for each given level
if there is a cooresponding level in color_prefs that is the
dictionary for the level otherwise it adds and empty dict
"""
prefs = {}
for j, col in enumerate(taxonomy_levels):
key = str(col)
col = str(col)
# Color by only one level
prefs[key] = {}
prefs[key]['column'] = col
if color_prefs:
for p in color_prefs:
if 'column' in color_prefs[p] and str(color_prefs[p]['column']) == col:
if 'colors' in color_prefs[p]:
prefs[key]['colors'] = color_prefs[p]['colors'].copy()
else:
prefs[key]['colors'] = {}
match = True
break
else:
match = False
if not match:
prefs[key] = {}
prefs[key]['column'] = col
prefs[key]['colors'] = {}
return prefs
def get_qiime_hex_string_color(index):
"""Retrieve an HEX color from the list of QIIME colors
Input:
index: index of the color to retrieve, if the number is greater than the
number of available colors, it will rollover in the list.
Output:
color: string in the format #FF0000
"""
assert index >= 0, "There are no negative indices for the QIIME colors"
n_colors = len(data_color_order)
if index >= n_colors:
index = int(index - floor((index / n_colors) * n_colors))
return data_colors[data_color_order[index]].toHex()
def matplotlib_rgb_color(rgb_color):
"""Returns RGB color in matplotlib format.
ex: (255,0,255) will return (1.0,0.0,1.0)
"""
return tuple([i / 255. for i in rgb_color])
| gpl-2.0 |
dmitryduev/pypride | bin/tecs.py | 1 | 20564 | #!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 21 15:18:45 2014
@author: oasis
"""
from pypride.vintlib import *
import os
import multiprocessing as mp
#import matplotlib.pyplot as plt
#import prettyplotlib as ppl
#from time import time as _time
import argparse
'''
#==============================================================================
# Function returning slant tecs (modified ion_igs)
#==============================================================================
'''
def ion_tec(sta, iono, elv, azi, UT, f_0=None):
'''
Calculate ionospheric delay using IGS vertical TEC maps
'''
# calculate distance to point J of ray pierce into ionosphere from site
# H = 438.1*1e3 # m - mean height of the ionosphere above R_E
H = 450*1e3 # m - height of ionosphere above R_E as stated in IONEX files
R_E = 6371.0*1e3 # m - Earth's radius from the TEC map
alpha = 0.9782
lat_geod = sta.lat_geod
lon_gcen = sta.lon_gcen
h_geod = sta.h_geod
UT_tec = iono.UT_tec
fVTEC = iono.fVTEC
#WGS84 Ellipsoid
a = 6378137.0 # m
f = 1.0/298.2572235630
b = a*(1.0-f) # m
ec = sqrt((a**2-b**2)/(a**2))
R_oscul = a*sqrt(1.0-ec**2)/(1.0-(ec*sin(lat_geod))**2) # m
source_vec = np.array([sin(pi/2.0-elv)*cos(azi),\
sin(pi/2.0-elv)*sin(azi),\
cos(pi/2.0-elv) ])
# slanted distance btw the ground and the iono layer
ds = (R_oscul+h_geod)*sin(-elv) + \
0.5 * sqrt( (2.0*(R_oscul+h_geod)*sin(-elv))**2 - \
4.0*((R_oscul+h_geod)**2 - (R_E+H)**2) )
# cart crds of the starting point
rpt = [R_oscul+h_geod, lat_geod, lon_gcen]
r0 = sph2cart(rpt)
# cart crds of the ionospheric pierce point
r1 = r0 + ds*source_vec
# lat/long of the pierce point
rlalo = cart2sph(r1)
lon = 180.0*rlalo[2]/pi
lat = 180.0*rlalo[1]/pi
# find closest epoch in TEC data:
# easy case - UT is in UT_tec
n0 = np.searchsorted(UT_tec, UT)
if UT in UT_tec:
# only need to interpolate TECz to the Lat/Lon of the pierce point
TEC_z = float(fVTEC[n0](lon, lat))
else:
# else take 4 consecutive epochs and interpolate:
N_epoch = len(fVTEC)
if n0==1 or n0==0:
nl = 0; nr = 4
elif n0==N_epoch-2 or n0==N_epoch-1:
nl = N_epoch-4; nr = N_epoch
else:
nl = n0-2; nr = n0+2
TEC_z = []
for nn in range(nl,nr):
TEC_z.append(float(fVTEC[nn](lon, lat)))
# interpolate zenith TECz to the epoch of observation
# fTEC = sp.interpolate.interp1d(UT_tec[nl:nr], TEC_z, kind='linear')
# TEC_z = fTEC(UT)
TEC_z = np.interp(UT, UT_tec[nl:nr], TEC_z)
# calculate slanted TEC
TEC = TEC_z / cos(asin( (R_oscul+h_geod)*sin(alpha*(pi/2.0-elv))/(R_E+H) ))
TEC_tecu = 0.1*TEC # in TEC units
# calculate ionspheric delay for the source
# delay_ion = 5.308018e10*TEC_tecu/(4.0*pi**2*f_0*f_0)
return TEC_tecu
'''
#==============================================================================
# Calculate all sorts of TECs
#==============================================================================
'''
def calctec(ins):
'''
Calculate all sorts of TECs
'''
# parse single-variable input:
record, sta_r, sta_t, sou_type, source, \
ip_tecs, const, inp, tec_uplink = ins
t = datetime.datetime(*map(int, record[4:10])) + \
datetime.timedelta(seconds=record[10]/2.0)
# t_obs - middle of obs run
t_obs = (t.hour*3600.0 + t.minute*60.0 + record[10]/2.0)/86400.0
''' download iono data '''
# doup(False, inp['do_ion_calc'], \
# inp['cat_eop'], inp['meteo_cat'], inp['ion_cat'], \
# t, t, 'igs')
''' load vtec maps '''
try:
if record[34]==30.0 or record[35]==30.0 or record[36]==1.0:
iono = ion(t, t, inp)
''' calculate tec for downlink '''
if record[34]==30.0:
st = sta_r[int(record[3])-1] # receiving station
el = record[18]*pi/180
if record[17]>180:
az = (record[17]-360.0)*pi/180
else:
az = record[17]*pi/180
tec = ion_tec(st, iono, el, az, t_obs)
# print 'TEC down:', record[34], '{:4.1f}'.format(tec)
# replace corresponding field in the record:
record[34] = tec
''' calculate tec for uplink '''
if record[35]==30.0:
st = sta_t[int(record[33])-1] # transmittimg station
# use planetary ephems
mjd = mjuliandate(t.year, t.month, t.day)
UTC = (t.hour + t.minute/60.0 + t.second/3600.0)/24.0
JD = mjd + 2400000.5
TAI, TT = taitime(mjd, UTC)
with open(inp['cat_eop'], 'r') as fc:
fc_lines = fc.readlines()
eops = np.zeros((7,7)) # +/- 3 days
for jj in range(len(fc_lines)):
if fc_lines[jj][0]!=' ' and fc_lines[jj][0]!='*':
entry = [float(x) for x in fc_lines[jj].split()]
if len(entry) > 0 and entry[3] == np.floor(mjd) - 3:
for kk in range(7):
eops[kk,0] = entry[3] # mjd
eops[kk,1] = entry[6] # UT1-UTC
eops[kk,2] = entry[6] - nsec(entry[3]) # UT1 - TAI
eops[kk,3] = entry[4] # x_p
eops[kk,4] = entry[5] # y_p
eops[kk,5] = entry[8] # dX
eops[kk,6] = entry[9] # dY
entry = [float(x) for x in fc_lines[jj+kk+1].split()]
break #exit loop
UT1, eop_int = eop_iers(mjd, UTC, eops)
CT, dTAIdCT = t_eph(JD, UT1, TT, st.lon_gcen, st.u, st.v)
# Earth:
rrd = pleph(JD+CT, 3, 12, inp['jpl_eph'])
earth = np.reshape(np.asarray(rrd), (3,2), 'F') * 1e3
# Venus/Mars
if source.lower()=='vex':
rrd = pleph(JD+CT, 2, 12, inp['jpl_eph'])
elif source.lower()=='mex':
rrd = pleph(JD+CT, 4, 12, inp['jpl_eph'])
planet = np.reshape(np.asarray(rrd), (3,2), 'F') * 1e3
lt = np.linalg.norm(planet[:,0]-earth[:,0])/const.C
dt = 3*datetime.timedelta(seconds=lt) # 2-way LT
if tec_uplink=='planet':
# 3 LTs ago (2LTs - signal round trip + another LT )
# print dt
# print CT
CT -= dt.total_seconds()/86400.0
_, eop_int = eop_iers(mjd, UTC-2.0*lt/86400.0, eops)
# print CT
## Earth:
rrd = pleph(JD+CT, 3, 12, inp['jpl_eph'])
earth = np.reshape(np.asarray(rrd), (3,2), 'F') * 1e3
# Earth's acceleration in m/s**2:
v_plus = np.array(pleph(JD+CT+1.0/86400.0, 3, 12, inp['jpl_eph'])[3:])
v_minus = np.array(pleph(JD+CT-1.0/86400.0, 3, 12, inp['jpl_eph'])[3:])
a = (v_plus - v_minus)*1e3 / 2.0
a = np.array(np.matrix(a).T)
earth = np.hstack((earth, a))
## Sun:
rrd = pleph(JD+CT, 11, 12, inp['jpl_eph'])
sun = np.reshape(np.asarray(rrd), (3,2), 'F') * 1e3
## Moon:
rrd = pleph(JD+CT, 10, 12, inp['jpl_eph'])
moon = np.reshape(np.asarray(rrd), (3,2), 'F') * 1e3
# Venus/Mars
if source.lower()=='vex':
rrd = pleph(JD+CT, 2, 12, inp['jpl_eph'])
elif source.lower()=='mex':
rrd = pleph(JD+CT, 4, 12, inp['jpl_eph'])
planet = np.reshape(np.asarray(rrd), (3,2), 'F') * 1e3
r2000 = ter2cel(t-dt, eop_int, dTAIdCT, 'iau2000')
st = dehanttideinel(st, t-dt, earth, sun, moon, r2000)
st = hardisp(st, t-dt, r2000)
st = poletide(st, t-dt, eop_int, r2000)
st.j2000gp(r2000)
r = planet[:,0] - (st.r_GCRS + earth[:,0])
ra = np.arctan2(r[1],r[0]) # right ascention
dec = np.arctan(r[2]/np.sqrt(r[0]**2+r[1]**2)) # declination
if ra < 0: ra += 2.0*np.pi
K_s = np.array([cos(dec)*cos(ra), \
cos(dec)*sin(ra), \
sin(dec)])
az, el = aber_source(st.v_GCRS, st.vw, K_s, r2000, earth)
# print map(lambda x:x*180/pi, (az,el))
# print t_obs, 2.0*lt/86400.0
tec = ion_tec(st, iono, el, az, (t_obs - 2.0*lt/86400.0))
# print t, tec
elif tec_uplink=='sc':
# load s/c ephemeris:
# tic = _time()
eph = load_sc_eph(sou_type, source, t, t, inp)
# print 'loading eph took', _time()-tic, ' s'
# lt to station in seconds at t_obs
lt, _, _ = st.LT_radec_bc(eph.bcrs[0], eph.CT, JD, UTC, inp['jpl_eph'])
# az/el 2LT ago (another LT is accounted for internally)
r2000 = ter2cel(t, eop_int, dTAIdCT, 'iau2000')
az, el = st.AzEl2(eph.gtrs, eph.UT, JD, \
UTC - 2.0*lt/86400.0, inp['jpl_eph'])
# print map(lambda x:x*180/pi, (az,el))
# print t_obs, 2.0*lt/86400.0
tec = ion_tec(st, iono, el, az, (t_obs - 2.0*lt/86400.0))
# print t, tec
# print 'TEC up:', record[35], '{:4.1f}'.format(tec)
# replace corresponding field in the record:
record[35] = tec
''' calculate IP tec (intepolate from table) '''
if record[36]==1.0:
orb_phase = record[22] + record[23]
ip_tec, _ = lagint(4, ip_tecs[:,0], ip_tecs[:,3], orb_phase)
record[36] = ip_tec*(1+880.0/749.0)
except Exception, err:
print err
print 'error occured for {:4d}/{:02d}/{:02d}'\
.format(t.year, t.month, t.day)
finally:
return record
'''
#==============================================================================
# Run pipeline
#==============================================================================
'''
if __name__ == '__main__':
'''
This script is supposed to be run from the command line
'''
# create parser
parser = argparse.ArgumentParser()
# optional arguments
parser.add_argument('-s', '--spacecraft', type=str, choices=['vex', 'mex'],
help='spacecraft')
parser.add_argument('-u', '--uppoint', type=str, choices=['sc', 'planet'],
default='planet',
help='where to point when calculating TEC on uplink:'+\
' \'planet\' to point at the S/C host planet (default),'+\
' \'sc\' to point at the S/C')
parser.add_argument('-i', '--ionomodel', type=str, choices=['igs', 'igr'],
default='igr',
help='IGS\' ionospheric TEC model to use: final or '+\
'rapid (default)')
parser.add_argument('-p', '--parallel', action='store_true',
help='run computation in parallel mode')
# positional argument
parser.add_argument('inpFile', type=str,
help="input ScintObsSummary table")
args = parser.parse_args()
scint_table_file = args.inpFile
# which spacecraft?
if args.spacecraft=='vex':
source = 'vex'
elif args.spacecraft=='mex':
source = 'mex'
else:
# try to guess:
if 'vex' in scint_table_file.lower():
source = 'vex'
elif 'mex' in scint_table_file.lower():
source = 'mex'
else:
raise Exception('Spacecraft not set; failed to guess.')
# where to point uplink?
if args.uppoint=='sc':
tec_uplink = 'sc'
elif args.uppoint=='planet':
tec_uplink = 'planet'
else:
# let's do it quickly by default
tec_uplink = 'planet'
# proceed with pipelining:
inp = inp_set('inp.cfg')
inp.iono_model = args.ionomodel
inp = inp.get_section('all')
const = constants()
receivers = ['METSAHOV', 'MEDICINA', 'MATERA', 'NOTO', 'WETTZELL', \
'YEBES40M', 'PUSHCHIN', 'ONSALA60', 'HARTRAO', \
'SESHAN25', 'KUNMING', 'URUMQI', 'HART15M', \
'SVETLOE', 'ZELENCHK', 'BADARY', 'TIANMA65', 'WARK12M',\
'HOBART26', 'HOBART12', 'YARRA12M', 'KATH12M',\
'WARK30M', 'WETTZ13S', 'WETTZ13N']
transmitters = ['NWNORCIA', 'CEBREROS', 'MALARGUE', 'TIDBIN64', 'DSS35',\
'DSS45', 'DSS34', 'DSS65', 'DSS63', 'DSS14', 'DSS15']
recv_short = shname(receivers, inp['shnames_cat'], inp['shnames_cat_igs'])
tran_short = shname(transmitters, inp['shnames_cat'], inp['shnames_cat_igs'])
''' load cats '''
# last argument is dummy
sou_type = 'S'
_, sta_r, _ = load_cats(inp, source, sou_type, receivers, \
datetime.datetime.now())
_, sta_t, _ = load_cats(inp, source, sou_type, transmitters, \
datetime.datetime.now())
''' calculate site positions in geodetic coordinate frame
+ transformation matrix VW from VEN to the Earth-fixed coordinate frame '''
for st_r in sta_r:
st_r.geodetic(const)
for st_t in sta_t:
st_t.geodetic(const)
''' load IP TEC table '''
if source == 'vex':
f_iptec = 'TecVenus.NFS.txt'
if source == 'mex':
f_iptec = 'TecMars.NFS.txt'
with open(os.path.join(inp['ion_cat'], 'scint', f_iptec)) as fipt:
ip_tecs = np.array([map(float,x.split()) for x in fipt.readlines() \
if x[0]!='#'])
''' Parse Scint Table '''
# with open(os.path.join(inp['ion_cat'], 'scint', scint_table_file)) as f:
with open(scint_table_file) as f:
f_lines = f.readlines()
scintTable = []
f_lines_clean = [l for l in f_lines if l[0]=='|' and len(l)>20]
for line in f_lines_clean:
line_parsed = map(float, line.replace('|',' ').split())
scintTable.append(line_parsed)
scintTable = np.array(scintTable)
''' precompute ephemerides for faster access '''
# get unique dates in the scintTable
dates = np.array([datetime.datetime(*map(int, x[4:7])) for x in scintTable])
dates = np.unique(dates)
# planet position could be used instead!
if tec_uplink == 'sc':
# now get "min" and "max" epochs on that day
startStopTimes = []
for date in dates:
span = [x[4:10] for x in scintTable \
if datetime.datetime(*map(int,x[4:7])) == date]
mjds = []
for spa in span:
mjds.append(mjuliandate(*spa))
mjd_min = np.argmin(mjds) # "min" epoch
mjd_max = np.argmax(mjds) # "max" epoch
start = datetime.datetime(*map(int, span[mjd_min]))
stop = datetime.datetime(*map(int, span[mjd_max]))
startStopTimes.append([start, stop])
# print date, start, stop
# create one ephemeris spanning across min-max
# (not to recalculate it at each step)
print 'Creating ephemerides...'
# do it in a parallel way!
def ephmakerWrapper(args):
''' wrapper func to unpack arguments '''
return load_sc_eph(*args)
# check raw eph files boundaries:
path = os.path.join(inp['sc_eph_cat'], 'raw_'+source.lower())
checkbound(source, orb_path=path)
## make single-var inputs:
#ins = []
#for (start, stop) in startStopTimes:
# ins.append((sou_type, source, start, stop, inp))
## number of cores available
#n_cpu = mp.cpu_count()
## create pool
#pool = mp.Pool(n_cpu)
## asyncronously apply calctec to each of ins
#pool.map_async(ephmakerWrapper, ins)
## close bassejn
#pool.close() # we are not adding any more processes
#pool.join() # wait until all threads are done before going on
## do it in serial way!
for ii, (start, stop) in enumerate(startStopTimes):
print len(startStopTimes)-ii, 'ephemerides to go'
load_sc_eph(sou_type, source, start, stop, inp, load=False)
''' download iono data if necessary '''
print 'Fetching ionospheric data...'
#tec_model = 'igs' # final 'igs' or rapid 'igr' IGS solution
tec_model = inp['iono_model']
for t in dates:
doup(False, inp['do_ion_calc'], \
inp['cat_eop'], inp['meteo_cat'], inp['ion_cat'], \
t, t, tec_model)
''' iterate over records in scint table '''
print 'Computing TECs...'
# make single-var inputs:
ins = []
for record in scintTable:
ins.append((record, sta_r, sta_t, sou_type, source, \
ip_tecs, const, inp, tec_uplink))
## parallel way:
if args.parallel:
# number of cores available
n_cpu = mp.cpu_count()
# create pool
pool = mp.Pool(n_cpu)
# asyncronously apply calctec to each of ins
result = pool.map_async(calctec, ins)
# close bassejn
pool.close() # we are not adding any more processes
pool.join() # wait until all threads are done before going on
# get the ordered results back into obsz
outs = result.get()
scintTableOut = []
for ou in outs:
scintTableOut.append(ou)
## serial way:
else:
scintTableOut = []
for ii, record in enumerate(ins):
print len(scintTable)-ii, 'records to go'
scintTableOut.append(calctec(record))
#%%
'''
#==========================================================================
# Create output table
#==========================================================================
'''
print 'Outputting...'
# date string
#date_string = datetime.datetime.now().strftime("%y%m%d")
#out_table = ''.join(('ScintTable.', date_string))
# put in the vispy output folder
if '/' in scint_table_file:
slash = [i for i,x in enumerate(scint_table_file) if x=='/']
out = scint_table_file[slash[-1]+1:]
else:
out = scint_table_file
out_table = ''.join((out[:-4], 'i', out[-4:]))
first_entry = [i for i, x in enumerate(f_lines) if x[0]=='|'][0]
header = [x for x in f_lines[:first_entry] if x[0]=='/']
last_entry = [i for i, x in enumerate(f_lines) if x[0]=='|'][-1]
footer = [x for x in f_lines[last_entry:] if x[0]=='/']
with open(os.path.join(inp['out_path'], out_table), 'w') as f:
# print header:
for line in header:
f.write(''.join((line.strip(), '\n')))
# print table:
for ii, record in enumerate(scintTableOut):
line = '|{:5d}'.format(ii+1)
line += '{:4d}{:4d}{:4d}{:8d} {:02d} {:02d} {:02d} {:02d} {:02d}'\
.format(*map(int, record[1:10]) )
line += '{:6d}{:4d} {:02d}'\
.format(*map(int, record[10:13]))
line += ' {:04.1f}'\
.format(float(record[13]))
line += '{:4d} {:02d}'\
.format(*map(int,record[14:16]))
line += ' {:04.1f}'\
.format(float(record[16]))
line += '{:6.1f}{:6.1f}{:6.1f}{:6.1f}{:8.4f}{:7.2f}{:7.2f} |'\
.format(*map(float, record[17:24]))
line += '{:8.3f}{:7.3f}{:8.3f}{:6.3f}{:10.2f}{:7.2f}{:8.2f}'\
.format(*map(float, record[24:31]))
line += '{:8d} |{:3d}{:7d}'\
.format(*map(int, record[31:34]))
line += '{:6.1f}{:8.1f}{:9.1f} |\n'\
.format(*map(float, record[34:]))
f.write(line)
# print footer:
for line in footer:
f.write(''.join((line.strip(), '\n'))) | gpl-2.0 |
mxjl620/scikit-learn | benchmarks/bench_plot_fastkmeans.py | 294 | 4676 | from __future__ import print_function
from collections import defaultdict
from time import time
import numpy as np
from numpy import random as nr
from sklearn.cluster.k_means_ import KMeans, MiniBatchKMeans
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
chunk = 100
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('==============================')
print('Iteration %03d of %03d' % (it, max_it))
print('==============================')
print()
data = nr.random_integers(-50, 50, (n_samples, n_features))
print('K-Means')
tstart = time()
kmeans = KMeans(init='k-means++', n_clusters=10).fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.5f" % kmeans.inertia_)
print()
results['kmeans_speed'].append(delta)
results['kmeans_quality'].append(kmeans.inertia_)
print('Fast K-Means')
# let's prepare the data in small chunks
mbkmeans = MiniBatchKMeans(init='k-means++',
n_clusters=10,
batch_size=chunk)
tstart = time()
mbkmeans.fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %f" % mbkmeans.inertia_)
print()
print()
results['MiniBatchKMeans Speed'].append(delta)
results['MiniBatchKMeans Quality'].append(mbkmeans.inertia_)
return results
def compute_bench_2(chunks):
results = defaultdict(lambda: [])
n_features = 50000
means = np.array([[1, 1], [-1, -1], [1, -1], [-1, 1],
[0.5, 0.5], [0.75, -0.5], [-1, 0.75], [1, 0]])
X = np.empty((0, 2))
for i in range(8):
X = np.r_[X, means[i] + 0.8 * np.random.randn(n_features, 2)]
max_it = len(chunks)
it = 0
for chunk in chunks:
it += 1
print('==============================')
print('Iteration %03d of %03d' % (it, max_it))
print('==============================')
print()
print('Fast K-Means')
tstart = time()
mbkmeans = MiniBatchKMeans(init='k-means++',
n_clusters=8,
batch_size=chunk)
mbkmeans.fit(X)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.3fs" % mbkmeans.inertia_)
print()
results['MiniBatchKMeans Speed'].append(delta)
results['MiniBatchKMeans Quality'].append(mbkmeans.inertia_)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(50, 150, 5).astype(np.int)
features_range = np.linspace(150, 50000, 5).astype(np.int)
chunks = np.linspace(500, 10000, 15).astype(np.int)
results = compute_bench(samples_range, features_range)
results_2 = compute_bench_2(chunks)
max_time = max([max(i) for i in [t for (label, t) in results.iteritems()
if "speed" in label]])
max_inertia = max([max(i) for i in [
t for (label, t) in results.iteritems()
if "speed" not in label]])
fig = plt.figure('scikit-learn K-Means benchmark results')
for c, (label, timings) in zip('brcy',
sorted(results.iteritems())):
if 'speed' in label:
ax = fig.add_subplot(2, 2, 1, projection='3d')
ax.set_zlim3d(0.0, max_time * 1.1)
else:
ax = fig.add_subplot(2, 2, 2, projection='3d')
ax.set_zlim3d(0.0, max_inertia * 1.1)
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.5)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
i = 0
for c, (label, timings) in zip('br',
sorted(results_2.iteritems())):
i += 1
ax = fig.add_subplot(2, 2, i + 2)
y = np.asarray(timings)
ax.plot(chunks, y, color=c, alpha=0.8)
ax.set_xlabel('Chunks')
ax.set_ylabel(label)
plt.show()
| bsd-3-clause |
weissercn/MLTools | Dalitz_simplified/classifier_eval_simplified.py | 1 | 17436 |
#adapted from the example at http://scikit-learn.org/stable/auto_examples/svm/plot_rbf_parameters.html
"""
This script can be used to get the p value for classifiers. It takes input files with column vectors corresponding to features and lables.
Then there are two different routes one can go down. When mode has a value of 1, then a grid search will be performed on
one set of input files. If it is 2, then the hyperparemeter search is performed by spearmint. When the mode is turned off (0),
then the p value is computed for multiple sets of input files and the p value distribution is plotted. One sets all the valiables
including the classifier in the "args" list. The classifier provided is ignored if keras_mode is on (1) in which case a keras neural
network is used.
"""
from __future__ import print_function
print(__doc__)
import os
import p_value_scoring_object
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
from sklearn import cross_validation
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.cross_validation import StratifiedShuffleSplit
from sklearn.grid_search import GridSearchCV
from keras.utils import np_utils
from scipy import stats
import math
##############################################################################
# Utility function to move the midpoint of a colormap to be around
# the values of interest.
class MidpointNormalize(Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
def Xy_to_keras_Xy(X,y):
print("X.shape : ",X.shape)
keras_X = X
keras_y = np_utils.to_categorical(y, 2)
return (keras_X, keras_y)
def make_keras_model(n_hidden_layers, dimof_middle, dimof_input):
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.utils import np_utils, generic_utils
from keras.wrappers.scikit_learn import KerasClassifier
dimof_output =2
print("dimof_input : ",dimof_input, "dimof_output : ", dimof_output)
batch_size = 1
dropout = 0.5
countof_epoch = 5
model = Sequential()
model.add(Dense(input_dim=dimof_input, output_dim=dimof_middle, init="glorot_uniform",activation='tanh'))
model.add(Dropout(dropout))
for n in range(n_hidden_layers):
model.add(Dense(input_dim=dimof_middle, output_dim=dimof_middle, init="glorot_uniform",activation='tanh'))
model.add(Dropout(dropout))
model.add(Dense(input_dim=dimof_middle, output_dim=dimof_output, init="glorot_uniform",activation='sigmoid'))
#Compiling (might take longer)
model.compile(loss='categorical_crossentropy', optimizer='sgd')
return model
class Counter(object):
# Creating a counter object to be able to perform cross validation with only one split
def __init__(self, list1,list2):
self.current = 1
self.list1 =list1
self.list2 =list2
def __iter__(self):
'Returns itself as an iterator object'
return self
def __next__(self):
'Returns the next value till current is lower than high'
if self.current > 1:
raise StopIteration
else:
self.current += 1
return self.list1,self.list2
next = __next__ #python2
def histo_plot_pvalue(U_0,abins,axlabel,aylabel,atitle,aname):
bins_probability=np.histogram(U_0,bins=abins)[1]
#Finding the p values corresponding to 1,2 and 3 sigma significance.
no_one_std_dev=sum(i < (1-0.6827) for i in U_0)
no_two_std_dev=sum(i < (1-0.9545) for i in U_0)
no_three_std_dev=sum(i < (1-0.9973) for i in U_0)
print(no_one_std_dev,no_two_std_dev,no_three_std_dev)
with open(aname+"_p_values_1_2_3_std_dev.txt",'w') as p_value_1_2_3_std_dev_file:
p_value_1_2_3_std_dev_file.write(str(no_one_std_dev)+'\t'+str(no_two_std_dev)+'\t'+str(no_three_std_dev)+'\n')
#plt.rc('text', usetex=True)
textstr = '$1\sigma=%i$\n$2\sigma=%i$\n$3\sigma=%i$'%(no_one_std_dev, no_two_std_dev, no_three_std_dev)
# Making a histogram of the probability predictions of the algorithm.
fig_pred_0= plt.figure()
ax1_pred_0= fig_pred_0.add_subplot(1, 1, 1)
n0, bins0, patches0 = ax1_pred_0.hist(U_0, bins=bins_probability, facecolor='red', alpha=0.5)
ax1_pred_0.set_xlabel(axlabel)
ax1_pred_0.set_ylabel(aylabel)
ax1_pred_0.set_title(atitle)
plt.xlim([0,1])
# these are matplotlib.patch.Patch properties
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
# place a text box in upper left in axes coords
ax1_pred_0.text(0.85, 0.95, textstr, transform=ax1_pred_0.transAxes, fontsize=14,
verticalalignment='top', bbox=props)
fig_pred_0.savefig(aname+"_p_values_plot.png")
#fig_pred_0.show()
plt.close(fig_pred_0)
def classifier_eval(mode,keras_mode,args):
##############################################################################
# Setting parameters
#
name=args[0]
sample1_name= args[1]
sample2_name= args[2]
shuffling_seed = args[3]
#mode =0 if you want evaluation of a model =1 if grid hyperparameter search =2 if spearmint hyperparameter search
comp_file_list=args[4]
print(comp_file_list)
cv_n_iter = args[5]
clf = args[6]
C_range = args[7]
gamma_range = args[8]
if len(args)>9:
#AD mode =1 : Anderson Darling test used instead of Kolmogorov Smirnov
#AD mode =2 : Visualisation of the decision boundary
#AD mode anything else: use KS and no visualisation
AD_mode = args[9]
else:
AD_mode = 0
if mode==0:
#For standard evaluation
score_list=[]
print("standard evaluation mode")
elif mode==1:
#For grid search
print("grid hyperparameter search mode")
param_grid = dict(gamma=gamma_range, C=C_range)
elif mode==2:
#For spearmint hyperparameter search
score_list=[]
print("spearmint hyperparameter search mode")
else:
print("No valid mode chosen")
return 1
##############################################################################
# Load and prepare data set
#
# dataset for grid search
for comp_file_0,comp_file_1 in comp_file_list:
print("Operating of files :"+comp_file_0+" "+comp_file_1)
#extracts data from the files
features_0=np.loadtxt(comp_file_0,dtype='d')
features_1=np.loadtxt(comp_file_1,dtype='d')
#determine how many data points are in each sample
no_0=features_0.shape[0]
no_1=features_1.shape[0]
no_tot=no_0+no_1
#Give all samples in file 0 the label 0 and in file 1 the feature 1
label_0=np.zeros((no_0,1))
label_1=np.ones((no_1,1))
#Create an array containing samples and features.
data_0=np.c_[features_0,label_0]
data_1=np.c_[features_1,label_1]
data=np.r_[data_0,data_1]
np.random.shuffle(data)
X=data[:,:-1]
y=data[:,-1]
print("X : ",X)
print("y : ",y)
atest_size=0.2
if cv_n_iter==1:
train_range = range(int(math.floor(no_tot*(1-atest_size))))
test_range = range(int(math.ceil(no_tot*(1-atest_size))),no_tot)
#print("train_range : ", train_range)
#print("test_range : ", test_range)
acv = Counter(train_range,test_range)
#print(acv)
else:
acv = StratifiedShuffleSplit(y, n_iter=cv_n_iter, test_size=atest_size, random_state=42)
print("Finished with setting up samples")
# It is usually a good idea to scale the data for SVM training.
# We are cheating a bit in this example in scaling all of the data,
# instead of fitting the transformation on the training set and
# just applying it on the test set.
if AD_mode != 2:
scaler = StandardScaler()
X = scaler.fit_transform(X)
if mode==1:
##############################################################################
# Grid Search
#
# Train classifiers
#
# For an initial search, a logarithmic grid with basis
# 10 is often helpful. Using a basis of 2, a finer
# tuning can be achieved but at a much higher cost.
if AD_mode==1:
grid = GridSearchCV(clf, scoring=p_value_scoring_object.p_value_scoring_object_AD ,param_grid=param_grid, cv=acv)
else:
grid = GridSearchCV(clf, scoring=p_value_scoring_object.p_value_scoring_object ,param_grid=param_grid, cv=acv)
grid.fit(X, y)
print("The best parameters are %s with a score of %0.2f"
% (grid.best_params_, grid.best_score_))
# Now we need to fit a classifier for all parameters in the 2d version
# (we use a smaller set of parameters here because it takes a while to train)
C_2d_range = [1e-2, 1, 1e2]
gamma_2d_range = [1e-1, 1, 1e1]
classifiers = []
for C in C_2d_range:
for gamma in gamma_2d_range:
clf = SVC(C=C, gamma=gamma)
clf.fit(X_2d, y_2d)
classifiers.append((C, gamma, clf))
##############################################################################
# visualization
#
# draw visualization of parameter effects
plt.figure(figsize=(8, 6))
xx, yy = np.meshgrid(np.linspace(-3, 3, 200), np.linspace(-3, 3, 200))
for (k, (C, gamma, clf)) in enumerate(classifiers):
# evaluate decision function in a grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# visualize decision function for these parameters
plt.subplot(len(C_2d_range), len(gamma_2d_range), k + 1)
plt.title("gamma=10^%d, C=10^%d" % (np.log10(gamma), np.log10(C)),size='medium')
# visualize parameter's effect on decision function
plt.pcolormesh(xx, yy, -Z, cmap=plt.cm.RdBu)
plt.scatter(X_2d[:, 0], X_2d[:, 1], c=y_2d, cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.axis('tight')
plt.savefig('prediction_comparison.png')
# plot the scores of the grid
# grid_scores_ contains parameter settings and scores
# We extract just the scores
scores = [x[1] for x in grid.grid_scores_]
scores = np.array(scores).reshape(len(C_range), len(gamma_range))
# Draw heatmap of the validation accuracy as a function of gamma and C
#
# The score are encoded as colors with the hot colormap which varies from dark
# red to bright yellow. As the most interesting scores are all located in the
# 0.92 to 0.97 range we use a custom normalizer to set the mid-point to 0.92 so
# as to make it easier to visualize the small variations of score values in the
# interesting range while not brutally collapsing all the low score values to
# the same color.
plt.figure(figsize=(8, 6))
plt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)
plt.imshow(scores, interpolation='nearest', cmap=plt.cm.hot,
norm=MidpointNormalize(vmin=-1.0, midpoint=-0.0001))
plt.xlabel('gamma')
plt.ylabel('C')
plt.colorbar()
plt.xticks(np.arange(len(gamma_range)), gamma_range, rotation=45)
plt.yticks(np.arange(len(C_range)), C_range)
plt.title('Validation accuracy')
plt.savefig('Heat_map.png')
else:
if keras_mode==1:
from keras.models import Sequential
from keras.layers.core import Dense, Activation
from keras.layers import Dropout
from keras.utils import np_utils, generic_utils
dimof_input = X.shape[1]
dimof_output =2
y = np_utils.to_categorical(y, dimof_output)
print("dimof_input : ",dimof_input, "dimof_output : ", dimof_output)
#y = np_utils.to_categorical(y, dimof_output)
scores = []
counter = 1
for train_index, test_index in acv:
print("Cross validation run ", counter)
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
print("X_train : ",X_train)
print("y_train : ",y_train)
batch_size = 1
dimof_middle = args[10]
dropout = 0.5
countof_epoch = 5
n_hidden_layers = args[11]
model = Sequential()
model.add(Dense(input_dim=dimof_input, output_dim=dimof_middle, init="glorot_uniform",activation='tanh'))
model.add(Dropout(dropout))
for n in range(n_hidden_layers):
model.add(Dense(input_dim=dimof_middle, output_dim=dimof_middle, init="glorot_uniform",activation='tanh'))
model.add(Dropout(dropout))
model.add(Dense(input_dim=dimof_middle, output_dim=dimof_output, init="glorot_uniform",activation='sigmoid'))
#Compiling (might take longer)
model.compile(loss='categorical_crossentropy', optimizer='sgd')
model.fit(X_train, y_train,show_accuracy=True,batch_size=batch_size, nb_epoch=countof_epoch, verbose=0)
prob_pred = model.predict_proba(X_test)
print("prob_pred : ", prob_pred)
assert (not (np.isnan(np.sum(prob_pred))))
# for y is 2D change dimof_output =2, add y = np_utils.to_categorical(y, dimof_output) and change the following line
prob_pred = np.array([sublist[0] for sublist in prob_pred])
y_test = np.array([sublist[0] for sublist in y_test])
print("y_test : ", y_test)
print("prob_pred : ", prob_pred)
#Just like in p_value_scoring_strategy.py
y_test = np.reshape(y_test,(1,y_test.shape[0]))
prob_pred = np.reshape(prob_pred,(1,prob_pred.shape[0]))
prob_0 = prob_pred[np.logical_or.reduce([y_test==0])]
prob_1 = prob_pred[np.logical_or.reduce([y_test==1])]
if __debug__:
print("Plot")
if AD_mode==1:
p_AD_stat=stats.anderson_ksamp([prob_0,prob_1])
print(p_AD_stat)
scores.append(p_AD_stat[2])
else:
p_KS=stats.ks_2samp(prob_0,prob_1)
print(p_KS)
scores.append(p_KS[1])
counter +=1
else:
if keras_mode==2:
X, y = Xy_to_keras_Xy(X,y)
if AD_mode==1:
scores = (-1)*cross_validation.cross_val_score(clf,X,y,cv=acv,scoring=p_value_scoring_object.p_value_scoring_object_AD)
elif AD_mode==2:
print("X[:,0].min() , ", X[:,0].min(), "X[:,0].max() : ", X[:,0].max())
scores = (-1)*cross_validation.cross_val_score(clf,X,y,cv=acv,scoring=p_value_scoring_object.p_value_scoring_object_visualisation)
import os
os.rename("visualisation.png",name+"_visualisation.png")
else:
scores = (-1)*cross_validation.cross_val_score(clf,X,y,cv=acv,scoring=p_value_scoring_object.p_value_scoring_object)
print("scores : ",scores)
score_list.append(np.mean(scores))
if mode==2:
return np.mean(scores)
############################################################################################################################################################
############################################################### Evaluation of results ####################################################################
############################################################################################################################################################
if mode==0:
# The score list has been computed. Let's plot the distribution
print(score_list)
with open(name+"_p_values",'w') as p_value_file:
for item in score_list:
p_value_file.write(str(item)+'\n')
histo_plot_pvalue(score_list,50,"p value","Frequency","p value distribution",name)
if __name__ == "__main__":
print("Executing classifier_eval_simplified as a stand-alone script")
print()
comp_file_list=[]
from sklearn import tree
from sklearn.ensemble import AdaBoostClassifier
from sklearn.svm import SVC
#clf = SVC(C=100,gamma=0.1,probability=True, cache_size=7000)
####################################################################
# Dalitz operaton
####################################################################
for i in range(100):
comp_file_list.append((os.environ['MLToolsDir']+"/Dalitz/dpmodel/data/data.{0}.0.txt".format(i), os.environ['MLToolsDir']+"/Dalitz/dpmodel/data/data.2{0}.1.txt".format(str(i).zfill(2))))
clf = tree.DecisionTreeClassifier('gini','best',46, 100, 1, 0.0, None)
#clf = AdaBoostClassifier(base_estimator=tree.DecisionTreeClassifier(max_depth=2), learning_rate=0.95,n_estimators=440)
#clf = SVC(C=params['aC'],gamma=params['agamma'],probability=True, cache_size=7000)
args=["dalitz_dt","particle","antiparticle",100,comp_file_list,1,clf,np.logspace(-2, 10, 13),np.logspace(-9, 3, 13),1]
#For nn:
#args=["dalitz","particle","antiparticle",100,comp_file_list,1,clf,np.logspace(-2, 10, 13),np.logspace(-9, 3, 13),1,params['dimof_middle'],params['n_hidden_layers']]
####################################################################
# Gaussian samples operation
####################################################################
#clf = tree.DecisionTreeClassifier('gini','best',37, 89, 1, 0.0, None)
#clf = AdaBoostClassifier(base_estimator=tree.DecisionTreeClassifier(max_depth=2), learning_rate=0.01,n_estimators=983)
#clf = SVC(C=params['aC'],gamma=params['agamma'],probability=True, cache_size=7000)
#args=["gauss_svc","particle","antiparticle",100,comp_file_list,1,clf,np.logspace(-2, 10, 13),np.logspace(-9, 3, 13)]
#For nn:
#args=["dalitz","particle","antiparticle",100,comp_file_list,1,clf,np.logspace(-2, 10, 13),np.logspace(-9, 3, 13),params['dimof_middle'],params['n_hidden_layers']]
####################################################################
classifier_eval(0,0,args)
| mit |
kernc/scikit-learn | sklearn/cluster/tests/test_bicluster.py | 143 | 9461 | """Testing for Spectral Biclustering methods"""
import numpy as np
from scipy.sparse import csr_matrix, issparse
from sklearn.model_selection import ParameterGrid
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn.base import BaseEstimator, BiclusterMixin
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.cluster.bicluster import _scale_normalize
from sklearn.cluster.bicluster import _bistochastic_normalize
from sklearn.cluster.bicluster import _log_normalize
from sklearn.metrics import consensus_score
from sklearn.datasets import make_biclusters, make_checkerboard
class MockBiclustering(BaseEstimator, BiclusterMixin):
# Mock object for testing get_submatrix.
def __init__(self):
pass
def get_indices(self, i):
# Overridden to reproduce old get_submatrix test.
return (np.where([True, True, False, False, True])[0],
np.where([False, False, True, True])[0])
def test_get_submatrix():
data = np.arange(20).reshape(5, 4)
model = MockBiclustering()
for X in (data, csr_matrix(data), data.tolist()):
submatrix = model.get_submatrix(0, X)
if issparse(submatrix):
submatrix = submatrix.toarray()
assert_array_equal(submatrix, [[2, 3],
[6, 7],
[18, 19]])
submatrix[:] = -1
if issparse(X):
X = X.toarray()
assert_true(np.all(X != -1))
def _test_shape_indices(model):
# Test get_shape and get_indices on fitted model.
for i in range(model.n_clusters):
m, n = model.get_shape(i)
i_ind, j_ind = model.get_indices(i)
assert_equal(len(i_ind), m)
assert_equal(len(j_ind), n)
def test_spectral_coclustering():
# Test Dhillon's Spectral CoClustering on a simple problem.
param_grid = {'svd_method': ['randomized', 'arpack'],
'n_svd_vecs': [None, 20],
'mini_batch': [False, True],
'init': ['k-means++'],
'n_init': [10],
'n_jobs': [1]}
random_state = 0
S, rows, cols = make_biclusters((30, 30), 3, noise=0.5,
random_state=random_state)
S -= S.min() # needs to be nonnegative before making it sparse
S = np.where(S < 1, 0, S) # threshold some values
for mat in (S, csr_matrix(S)):
for kwargs in ParameterGrid(param_grid):
model = SpectralCoclustering(n_clusters=3,
random_state=random_state,
**kwargs)
model.fit(mat)
assert_equal(model.rows_.shape, (3, 30))
assert_array_equal(model.rows_.sum(axis=0), np.ones(30))
assert_array_equal(model.columns_.sum(axis=0), np.ones(30))
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
_test_shape_indices(model)
def test_spectral_biclustering():
# Test Kluger methods on a checkerboard dataset.
S, rows, cols = make_checkerboard((30, 30), 3, noise=0.5,
random_state=0)
non_default_params = {'method': ['scale', 'log'],
'svd_method': ['arpack'],
'n_svd_vecs': [20],
'mini_batch': [True]}
for mat in (S, csr_matrix(S)):
for param_name, param_values in non_default_params.items():
for param_value in param_values:
model = SpectralBiclustering(
n_clusters=3,
n_init=3,
init='k-means++',
random_state=0,
)
model.set_params(**dict([(param_name, param_value)]))
if issparse(mat) and model.get_params().get('method') == 'log':
# cannot take log of sparse matrix
assert_raises(ValueError, model.fit, mat)
continue
else:
model.fit(mat)
assert_equal(model.rows_.shape, (9, 30))
assert_equal(model.columns_.shape, (9, 30))
assert_array_equal(model.rows_.sum(axis=0),
np.repeat(3, 30))
assert_array_equal(model.columns_.sum(axis=0),
np.repeat(3, 30))
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
_test_shape_indices(model)
def _do_scale_test(scaled):
"""Check that rows sum to one constant, and columns to another."""
row_sum = scaled.sum(axis=1)
col_sum = scaled.sum(axis=0)
if issparse(scaled):
row_sum = np.asarray(row_sum).squeeze()
col_sum = np.asarray(col_sum).squeeze()
assert_array_almost_equal(row_sum, np.tile(row_sum.mean(), 100),
decimal=1)
assert_array_almost_equal(col_sum, np.tile(col_sum.mean(), 100),
decimal=1)
def _do_bistochastic_test(scaled):
"""Check that rows and columns sum to the same constant."""
_do_scale_test(scaled)
assert_almost_equal(scaled.sum(axis=0).mean(),
scaled.sum(axis=1).mean(),
decimal=1)
def test_scale_normalize():
generator = np.random.RandomState(0)
X = generator.rand(100, 100)
for mat in (X, csr_matrix(X)):
scaled, _, _ = _scale_normalize(mat)
_do_scale_test(scaled)
if issparse(mat):
assert issparse(scaled)
def test_bistochastic_normalize():
generator = np.random.RandomState(0)
X = generator.rand(100, 100)
for mat in (X, csr_matrix(X)):
scaled = _bistochastic_normalize(mat)
_do_bistochastic_test(scaled)
if issparse(mat):
assert issparse(scaled)
def test_log_normalize():
# adding any constant to a log-scaled matrix should make it
# bistochastic
generator = np.random.RandomState(0)
mat = generator.rand(100, 100)
scaled = _log_normalize(mat) + 1
_do_bistochastic_test(scaled)
def test_fit_best_piecewise():
model = SpectralBiclustering(random_state=0)
vectors = np.array([[0, 0, 0, 1, 1, 1],
[2, 2, 2, 3, 3, 3],
[0, 1, 2, 3, 4, 5]])
best = model._fit_best_piecewise(vectors, n_best=2, n_clusters=2)
assert_array_equal(best, vectors[:2])
def test_project_and_cluster():
model = SpectralBiclustering(random_state=0)
data = np.array([[1, 1, 1],
[1, 1, 1],
[3, 6, 3],
[3, 6, 3]])
vectors = np.array([[1, 0],
[0, 1],
[0, 0]])
for mat in (data, csr_matrix(data)):
labels = model._project_and_cluster(data, vectors,
n_clusters=2)
assert_array_equal(labels, [0, 0, 1, 1])
def test_perfect_checkerboard():
raise SkipTest("This test is failing on the buildbot, but cannot"
" reproduce. Temporarily disabling it until it can be"
" reproduced and fixed.")
model = SpectralBiclustering(3, svd_method="arpack", random_state=0)
S, rows, cols = make_checkerboard((30, 30), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
S, rows, cols = make_checkerboard((40, 30), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
S, rows, cols = make_checkerboard((30, 40), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
def test_errors():
data = np.arange(25).reshape((5, 5))
model = SpectralBiclustering(n_clusters=(3, 3, 3))
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_clusters='abc')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_clusters=(3, 'abc'))
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(method='unknown')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(svd_method='unknown')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_components=0)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_best=0)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_components=3, n_best=4)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering()
data = np.arange(27).reshape((3, 3, 3))
assert_raises(ValueError, model.fit, data)
| bsd-3-clause |
VandyAstroML/Vandy_AstroML | profiles/Nick/MLDigits1_1.py | 1 | 14577 | import sys
import os
import numpy as np
import math
import random
from scipy import stats
import matplotlib
matplotlib.use( 'Agg' )
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
#import sklearn
from datetime import datetime
startTime = datetime.now()
print("WELCOME TO A QUICK PYTHON PROGRAM.")
print("It will do machine learning stuff with Numbers.. yep. ")
"""
-----------------------------------------------------
NICHOLAS CHASON
Machine Learning Code 1. - Native Nays.
-----------------------------------------------------
"""
def plot_image_basic( xlist, ylist, title, xlab, ylab, legend_val, psize, xlog, ylog, yflip , pcounter, cmap_var=''):
print("Entered Basic Plot Function")
if legend_val != 0:
pass
plot_title=" Blank Title "
x_axis="Blank X"
y_axis="Blank Y"
pointsize = 5
#sets new plot features from call.
"""
if True:
plot_title = title
x_axis = xlab
y_axis = ylab
pointsize = psize
"""
#plt.title(plot_title)
#plt.xlabel(x_axis)
#plt.ylabel(y_axis)
#plt.yscale("log")
"""
if yflip == True:
try:
plt.ylim(max(ylist), min(ylist))
except:
print("uh.oh.... try except statement. check ylim.")
if ylog != 0:
plt.yscale("log", nonposy='clip')
if xlog != 0:
plt.xscale("log", nonposy='clip')
"""
plt.imshow(xlist, cmap=cmap_var)
#plt.xlim(min(xlist), max(xlist) )
figure_name=os.path.expanduser('~/Feb17astroML_plot%s.png' % pcount)
plt.savefig(figure_name)
print("Saving plot: %s" % figure_name)
plt.clf()
dummy = pcounter + 1
return dummy
"""
===================
LOAD DIGITS
===================
"""
digits = load_digits()
"""
#----------------------------------------
#To print the Descritption of load_digits
#----------------------------------------
#To print the full description of data...
print digits['DESCR']
"""
"""
=====================
PRINT INITIAL INFO
=====================
"""
max_pixel_value = 16
test_reduce_fraction = 4. #1/(fraction) Reduces the test set. [set 2 for half.]
pcount = 0
#Set the Colormap
color_map_used = plt.get_cmap('autumn')
#Print the Data Keys
print 'Data Dict Keys: ', digits.keys()
#Loading in data
#Including the pixel values for each sample.
digits_data = digits['data']
total_number_of_images = len(digits_data)
#Get a single random image index for print and plot.
max_image_idx = total_number_of_images
rand_image_idx = int(random.random() * max_image_idx)
digits_targetnames = digits['target_names']
digits_target = digits['target']
print '\nSample Data Matrix: Element #', rand_image_idx
print '------------------------'
#Prints digits_data[rand_image_idx].
#The pixel row length is currently set to 8.
length_row = 8
for idx, value in enumerate(digits_data[rand_image_idx]):
print ("%d " % int(value)),
#print " ",
if ((idx+1)%length_row == 0):
print '\n',
print 'Possible Target names: ', digits_targetnames
print 'Truth Targets: ', digits_target
print 'Total Images: ', len(digits_data)
"""
===========================
BUILD TRAINING / TEST SET
===========================
"""
test_fraction = 0.1
training_fraction = 1. - test_fraction
#Get Random Indexes.
test_number_of_images = math.floor(total_number_of_images * test_fraction)
test_idxs = random.sample(range(0, total_number_of_images), int(test_number_of_images))
training_number_of_images = math.floor(total_number_of_images * training_fraction)
training_idxs = random.sample(range(0, total_number_of_images), int(training_number_of_images))
print 'Length test : ', len(test_idxs)
print 'Length training: ', len(training_idxs)
test_training_ratio = test_number_of_images/training_number_of_images
test_total_ratio = test_number_of_images/total_number_of_images
print 'The Test/Training ratio is: ', test_training_ratio
print 'The Test/Total ratio is: ', test_total_ratio
"""
BUILD A SORTED LIST OF LISTS OF training DATA!
"""
#2 - Dimensional. 0-->9 ; [0 -- > matching indexes]
Training_Index_Master_Array = []
#Loop over all possilbe Number Values. {0 --> 9}
for num in range(0, 10):
num_idxs = []
for i, idx in enumerate(training_idxs):
if digits_target[idx] == num:
num_idxs.append(idx)
num_idxs.sort()
Training_Index_Master_Array.append(num_idxs)
"""
BUILD A SORTED LIST OF LISTS OF test DATA!
NOTE: CAUTION! DO NOT ACCESS THESE UNTIL THE END!!!!!!!
"""
#2 - Dimensional. 0-->9 ; [0 -- > matching indexes]
Test_Index_Master_Array = []
#Loop over all possilbe Number Values. {0 --> 9}
for num in range(0, 10):
num_idxs = []
for i, idx in enumerate(test_idxs):
if digits_target[idx] == num:
num_idxs.append(idx)
num_idxs.sort()
Test_Index_Master_Array.append(num_idxs)
#To Access the indexes of the training set matching Truth = index_TIMA
#index_TIMA = 2
#print Training_Index_Master_Array[index_TIMA]
#Do something
"""
==========================================================
Begin Machine Magic
==========================================================
"""
print("Building the Average Set of Numbers from Training Set...")
"""
-------------------------------
Build Average Number
-------------------------------
"""
#Declare Variables for loops
#Initialize Array for storing a single Average pixel array for a number.
#Training_Pixels_Master_Array = [[0 for i in range(10)] for y in range(64)]
#pix_vals = [[1 for x in range(length_row)] for y in range(length_row)]
pix_vals = [1 for x in range(length_row*length_row)]
#print pix_vals
temp_sum = 0
idx_counter = 0
pix_val = 0
# For each number, for each training example matching that number,
# for each pixel, FIND THE AVERAGE VALUE.
#Sums over each number
for num in range(0, 10):
#Sums over each pixel.
for pix_idx in range(0, (length_row*length_row)):
#tracks the sum of the pixel value for each matching image
#Sums over each matching image
for i, index in enumerate(Training_Index_Master_Array[num]):
pix_val = digits_data[index][pix_idx]
temp_sum += pix_val
idx_counter += 1
#Store Average Value and Clear temporary values.
avg_pix_val = temp_sum / idx_counter
idx_counter = 0
temp_sum = 0
pix_vals[pix_idx] = avg_pix_val
#Was done as a debegging measure. too lazy to remove.
if num == 0:
Training_Pixel_0 = pix_vals[:]
if num == 1:
Training_Pixel_1 = pix_vals[:]
if num == 2:
Training_Pixel_2 = pix_vals[:]
if num == 3:
Training_Pixel_3 = pix_vals[:]
if num == 4:
Training_Pixel_4 = pix_vals[:]
if num == 5:
Training_Pixel_5 = pix_vals[:]
if num == 6:
Training_Pixel_6 = pix_vals[:]
if num == 7:
Training_Pixel_7 = pix_vals[:]
if num == 8:
Training_Pixel_8 = pix_vals[:]
if num == 9:
Training_Pixel_9 = pix_vals[:]
if num == 10:
print("ERROR NUMBER SHOULDNT EQUAL 10...")
#SETS THE TRAINING ARRAY.
Training_Pixels_Master_Array= [ Training_Pixel_0, \
Training_Pixel_1, Training_Pixel_2, Training_Pixel_3, \
Training_Pixel_4, Training_Pixel_5, Training_Pixel_6, \
Training_Pixel_7, Training_Pixel_8, Training_Pixel_9, ]
"""
#========================================
#TO PRING OUT TRAINING Matrix
#========================================
"""
rand_number_value = int(digits_target[rand_image_idx])
print("Training_Pixels_Master_Array for Random Value: %d" % int(rand_number_value))
print("==================================================")
for idx, value in enumerate(Training_Pixels_Master_Array[rand_number_value]):
print ("%.2f " % value),
#print " ",
if ((idx+1)%length_row == 0):
print '\n',
"""
-------------------------------
END: Build Average Number
-------------------------------
"""
print("Finished Building Average Numbers from Training Data!... Choosing Test. ")
"""
=============================================
*********************************************
TESTING PORTION OF THE CODE. ENTER HERE.
*********************************************
=============================================
"""
predicted_value = 0 #What does the code predict
success_truths = 0 #How many correctly predicted
secondary_predicted_value = 0 #What is the codes second choice
secondary_success_truths = 0 #How many correctly predicted out of top two guesses
false_predictions = 0 #How many primary predictions are false
false_secondary_predictions = 0 #How many primary or secondary predictions are false
#Initialize relevant things
iterations = 0 #Total number of items looped over.
predictions = []
truths = []
successes = []
#LOOP FOR GOING OVER TEST SET!
print("MAX ITERATIONS SET TO 1/%d of test set." % test_reduce_fraction)
max_test_iterations = len(test_idxs)/test_reduce_fraction
#print("Test_Index_Master_Array[1]: ")
#print Test_Index_Master_Array[1]
for poop_index, poop_value in enumerate(test_idxs):
pass
if iterations >= max_test_iterations:
print("Breaking due to max_test_iterations being reached! ... Break")
break
"""
=====================================
TESTING SINGLE RANDOM DRAW from test
=====================================
"""
#Uncomment to choose a single Choice to evaluate.
random_test_idx = poop_value
#random_test_idx = random.choice(training_idxs)
print("\nSelected index: %s, as random choice." % random_test_idx)
#Compute the cost of random image from averages store in costs
#INITIALIZE COSTS.
costs_SI = []
costs = 0
for i_counter in range(10):
#print Training_Pixels_Master_Array[i_counter]
for pix_idx in range(length_row*length_row):
cost_val = abs(float(Training_Pixels_Master_Array[i_counter][pix_idx]) - \
float(digits_data[random_test_idx][pix_idx]))
costs += cost_val
costs_SI.append(costs)
costs = 0
#PRINT THE COSTS TO THE SCREEN.
print("======================================================")
print(" COSTS CALCULATED! ..." )
print("======================================================")
print("Predicted # | Pixel Cost_per_image ")
#PRint out the Costs.
for i in range(10):
stringv = float(costs_SI[i])
print " {0} .......... {1:.1f}".format(i, stringv)
"""
------------------------
FIND MIMIMUM COST
------------------------
"""
predicted_value = costs_SI.index(min(costs_SI))
#Find Secondary Value.
current_cost = 9999999
for i, cost in enumerate(costs_SI):
if i == predicted_value or cost >= current_cost:
pass
else:
current_cost = cost
secondary_predicted_value = i
truth = digits_target[random_test_idx]
print("WE FIRST PREDICT A VALUE OF: %d" % predicted_value)
print("It may also be the value : %d" % secondary_predicted_value)
print("THE ACTUAL VALUE WAS : %d" % truth)
#LISTS to store predictions and truths for analysis.
predictions.append(predicted_value)
truths.append(truth)
if predicted_value == truth:
successes.append(1) #Successes == 1 means TRUTH
success_truths += 1
else:
successes.append(0) #Successes == 0 means FAILURE
#PRINT THE FAILURES.
print("FAILURE EXAMPLE PRINTING....")
title_label = "A single number. "
x_label = "x coordinate"
y_label = "y coordinate"
x_data = digits['images'][poop_value]
y_data = []
legend_val = 0
pointsize = 3
yflip = False
ylog = 0
xlog = 0
pcount = plot_image_basic(x_data, y_data, title_label, x_label, y_label, \
legend_val, pointsize, xlog, ylog, yflip, pcount, color_map_used)
false_predictions += 1
if (secondary_predicted_value == truth) or (predicted_value == truth):
secondary_success_truths += 1
else:
false_secondary_predictions += 1
iterations += 1
print("\n")
print("*************************************************************")
print(" **************** FINISHED TESTING **************** ")
print("*************************************************************\n")
print("================================")
print("SUCCESS/ failure TABLE: ")
print("================================")
print("Correct Primary Predictions: %d " % success_truths)
print("Correct Pri|Sec Predictions: %d " % secondary_success_truths)
print("Total Predictions Made: %d " % iterations)
print("Ratio of Correct (Primary) to Total: %.3f" % (1.0*success_truths/iterations))
print("Ratio of Correct (Pri|Sec) to Total: %.3f" % (1.0*secondary_success_truths/iterations))
print("================================")
suc_count = 0
fail_count = 0
tot_count = 0
fail_num_by_num = []
success_ratio_by_num = []
fail_ratio_by_num = []
for num in range(10):
for i, elem in enumerate(truths):
if elem == num:
if successes[i] == 1:
#FAILURE
suc_count += 1
else:
fail_count += 1
tot_count += 1
try:
suc_ratio = 1.0*suc_count/tot_count
fail_ratio = 1.0*fail_count/tot_count
except:
print("Encountered a value with no data points! num = %d " % num)
suc_ratio = 0
fail_ratio = 0
success_ratio_by_num.append(suc_ratio)
fail_num_by_num.append(fail_count)
fail_ratio_by_num.append(fail_ratio)
suc_count = 0
fail_count = 0
tot_count = 0
print("Success Ratio By Number\n ------------------------------")
print("Truth_val | Success | Failure | #Fail Pri. ")
for imtired, reallytired in enumerate(success_ratio_by_num):
print(" %d: %.3f %.3f %d" % \
(imtired, reallytired, fail_ratio_by_num[imtired], \
fail_num_by_num[imtired]))
"""
====================
PRINT SAMPLE IMAGE
====================
"""
#initial plots generated to 0.
#pcount = 0
#Set the Colormap
color_map_used = plt.get_cmap('autumn')
#Plot 1. A single number.
#------------------------------------------------------------------
#Generate a random number for the image from 0 ==> max_image_idx
#max_image_idx = total_number_of_images
#rand_image_idx = int(random.random() * max_image_idx)
"""
title_label = "A single number. "
x_label = "x coordinate"
y_label = "y coordinate"
x_data = digits['images'][rand_image_idx]
y_data = []
legend_val = 0
pointsize = 3
yflip = False
ylog = 0
xlog = 0
pcount = plot_image_basic(x_data, y_data, title_label, x_label, y_label, \
legend_val, pointsize, xlog, ylog, yflip, pcount, color_map_used)
"""
"""
print("Now Plotting....")
#
===================
PLOTTING GUESSES
===================
#
pcount = 0
title_label = ""
x_label = ""
y_label = ""
x_data = np.linspace(0, len(errorsLIST), num=len(errorsLIST))
y_data = errorsLIST
legend_val = 0
pointsize = 3
yflip = False
ylog = 0
xlog = 0
pcount = plot_basic(x_data, y_data, title_label, x_label, y_label, \
legend_val, pointsize, xlog, ylog, yflip, pcount)
"""
print("Time: ")
print (datetime.now() - startTime)
print("END. ")
#Doop. | mit |
rchurch4/georgetown-data-science-fall-2015 | data_preparation/data_preparation_lib/clean_and_feature_generation_yelp.py | 1 | 10765 | # Ravi Makhija
# clean_and_feature_generation_yelp.py
# Version 4
# Python 2
#
# Description:
# Function to generate extra features. This is used in
# data_preparation_yelp.py.
#
# List of features generated:
# restaurant_latitude
# restaurant_longitude
# user_latitude,
# user_longitude
# user_restaurant_distance
# user_is_local
# user_review_length
# mean_restaurant_rating_yelp
# mean_restaurant_rating_yelp_local
# mean_restaurant_rating_yelp_non_local
#
# References:
# pandas join for aggregate:
# http://stackoverflow.com/questions/12200693/python-pandas-how-to-assign-groupby-operation-results-back-to-columns-in-parent
# pandas tutorial
# http://pandas.pydata.org/pandas-docs/stable/10min.html#min
import pandas as pd
import numpy as np
import os
from geopy.geocoders import Nominatim
from geopy.distance import vincenty
import time
import sys
def clean_and_feature_generation_yelp(input_file_paths):
# input:
# input_file_paths
# takes the input file paths as a list of strings.
# these paths point to csv files created from
# the original scraped json files.
# output:
# One csv per input csv file is created.
# return
# None
print 'Starting feature additions and basic cleaning.'
################
# Begin adding features and basic cleaning.
################
for i, current_input_file_path in enumerate(input_file_paths):
# Dynamically make output file paths
current_output_file_path = current_input_file_path.replace('.csv', '_cleaned_features.csv')
print "Current file: ", current_input_file_path
################
# Data loading
################
# Load data in as a Pandas DataFrame.
d = pd.read_csv(current_input_file_path)
num_rows = len(d) # this should only be changed during testing.
################
# Data cleaning
################
def data_cleaning():
# Clean up address formatting
for i in range(len(d)):
d.loc[i, 'restaurant_address'] = d.loc[i, 'restaurant_address'].replace('Washington, DC', ', Washington, DC')
# Extract and coerce user_num_reviews to a float
for i in range(len(d)):
number_extract = d.loc[i, 'user_num_reviews'][0:d.loc[i, 'user_num_reviews'].find('review')]
d.loc[i, 'user_num_reviews'] = float(number_extract.strip())
print 'data_cleaning() complete'
################
# Feature generating main methods
#
# Note: Some of the feature generation scrip takes time
# to run (e.g. grabbing geocodes), that is why this script
# has been made modular by allowing one to specify which
# main methods (e.g. which features) to run in the end.
################
def make_restaurant_geocode():
# Generates:
# d.restaurant_latitude
# d.restaurant_longitude
# Assign geocode based on restaurant_location.
# Since the number of cities we are investigating is
# relatively small (definitely under 10 in the foreseeable
# future), hardcoding in the latitude and longitude seems
# like the quickest approach.
for i in range(num_rows):
# Restaurant geocode
if d.loc[i, 'restaurant_location'] == "Washington, DC":
d.loc[i, 'restaurant_latitude'] = 38.8949549
d.loc[i, 'restaurant_longitude'] = -77.0366456
elif d.loc[i, 'restaurant_location'] == "Nashville, TN":
d.loc[i, 'restaurant_latitude'] = 36.1622296
d.loc[i, 'restaurant_longitude'] = -86.774353
print 'make_restaurant_geocode() complete'
def make_user_geocode():
# Generates:
# d.user_latitude,
# d.user_longitude
# Load in lookup table for geocodes
geocode_lookup_table_df = pd.read_csv('data/geocode_lookup_table.csv')
# Make dictionary out of lookup table
a = geocode_lookup_table_df.user_location
b = geocode_lookup_table_df.user_latitude
c = geocode_lookup_table_df.user_longitude
dict_keys = list(a)
dict_values = zip(list(b), list(c))
geocode_lookup_table = dict(zip(dict_keys, dict_values))
# Get lat/long for cities and store in dataframe
for i in range(num_rows):
d.loc[i, 'user_latitude'] = geocode_lookup_table[d.loc[i, 'user_location']][0]
d.loc[i, 'user_longitude'] = geocode_lookup_table[d.loc[i, 'user_location']][1]
# replace missing values with NaN
#d = d.applymap(lambda x: np.nan if isinstance(x, basestring) and x.isspace() else x)
print 'make_user_geocode() complete'
def make_user_restaurant_distance():
# Generates:
# d.user_restaurant_distance
# Description:
# Uses the Vincenty distance formula to calculate distance between
# two points on a sphere, using the latitude and longitude.
for i in range(num_rows):
restaurant_geocode = (d.loc[i, 'restaurant_latitude'], d.loc[i, 'restaurant_longitude'])
user_geocode = (d.loc[i, 'user_latitude'], d.loc[i, 'user_longitude'])
try:
d.loc[i, 'user_restaurant_distance'] = vincenty(restaurant_geocode, user_geocode).miles
except: # crashing when lat/long is missing
continue
print 'make_user_restaurant_distance() complete'
def make_user_is_local():
# Generates:
# d.is_local
# Description:
# User is considered local if he or she is within the distance_threshold.
distance_threshold = 50 # in miles
for i in range(num_rows):
if d.loc[i, 'user_restaurant_distance'] <= distance_threshold:
d.loc[i, 'user_is_local'] = True
else:
d.loc[i, 'user_is_local'] = False
print 'make_user_is_local() is complete'
def make_user_review_length():
# Generates:
# d.user_review_length
for i in range(num_rows): # need to set this to len(d) later
d.loc[i, 'user_review_length'] = len(d.loc[i, 'user_review'])
print 'make_user_review_length() is complete'
def make_user_rating_mean_for_restaurant_yelp(d):
# Generates:
# d.user_rating_mean_for_restaurant_yelp
# Description:
# Mean of all yelp user ratings in data set by restaurant.
# Input:
# The same dataframe all functions here are working with.
# for some reason, only the functions that use d.groupby
# require passing d as an argument. the other functions
# have access to d already. that is the reasons for this
# input.
mean_ratings = d.groupby('restaurant_name')['user_rating'].mean()
d = d.join(mean_ratings, on='restaurant_name', rsuffix='_mean_for_restaurant_yelp')
print 'make_user_rating_mean_for_restaurant_yelp() is complete'
def make_user_rating_mean_for_restaurant_yelp_local(d):
# Generates:
# d.user_rating_mean_for_restaurant_yelp_local
# Input:
# The same dataframe all functions here are working with.
# for some reason, only the functions that use d.groupby
# require passing d as an argument. the other functions
# have access to d already. that is the reasons for this
# input.
# replace missing values with NaN (don't need to do this currently)
#d = d.applymap(lambda x: np.nan if isinstance(x, basestring) and x.isspace() else x)
mean_ratings = d.groupby(['user_is_local', 'restaurant_name'])['user_rating'].mean()
mean_ratings_local = mean_ratings[1] # there's surely a better way to do this
#d = d.join(mean_ratings, on=['user_is_local', 'restaurant_name'], rsuffix='_mean_for_restaurant_yelp_local')
d = d.join(mean_ratings_local, on=['restaurant_name'], rsuffix='_mean_for_restaurant_yelp_local')
print 'make_user_rating_mean_for_restaurant_yelp_local() is complete'
def make_user_rating_mean_for_restaurant_yelp_non_local(d):
# Generates:
# d.user_rating_mean_for_restaurant_yelp_non_local
# Input:
# The same dataframe all functions here are working with.
# for some reason, only the functions that use d.groupby
# require passing d as an argument. the other functions
# have access to d already. that is the reasons for this
# input.
mean_ratings = d.groupby(['user_is_local', 'restaurant_name'])['user_rating'].mean()
mean_ratings_non_local = mean_ratings[0]
d = d.join(mean_ratings_non_local, on=['restaurant_name'], rsuffix='_mean_for_restaurant_yelp_non_local')
print 'make_user_rating_mean_for_restaurant_yelp_non_local() is complete'
################
# Run 'closure' main methods
################
global_main_switch = True # set to true to quickly run all main methods
if False or global_main_switch: # data cleaning
data_cleaning()
if True or global_main_switch: # geocode-based features
make_restaurant_geocode()
make_user_geocode()
make_user_restaurant_distance()
make_user_is_local()
if False or global_main_switch: # review text-based features
make_user_review_length()
if False or global_main_switch: # aggregate mean yelp ratings
make_user_rating_mean_for_restaurant_yelp(d)
make_user_rating_mean_for_restaurant_yelp_local(d)
make_user_rating_mean_for_restaurant_yelp_non_local(d)
################
# Save results
################
d.to_csv(current_output_file_path, index=False)
print 'finished creating', current_output_file_path # success message
| mit |
cl4rke/scikit-learn | examples/ensemble/plot_gradient_boosting_regression.py | 227 | 2520 | """
============================
Gradient Boosting regression
============================
Demonstrate Gradient Boosting on the Boston housing dataset.
This example fits a Gradient Boosting model with least squares loss and
500 regression trees of depth 4.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
###############################################################################
# Load data
boston = datasets.load_boston()
X, y = shuffle(boston.data, boston.target, random_state=13)
X = X.astype(np.float32)
offset = int(X.shape[0] * 0.9)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
###############################################################################
# Fit regression model
params = {'n_estimators': 500, 'max_depth': 4, 'min_samples_split': 1,
'learning_rate': 0.01, 'loss': 'ls'}
clf = ensemble.GradientBoostingRegressor(**params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
print("MSE: %.4f" % mse)
###############################################################################
# Plot training deviance
# compute test set deviance
test_score = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
test_score[i] = clf.loss_(y_test, y_pred)
plt.figure(figsize=(12, 6))
plt.subplot(1, 2, 1)
plt.title('Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, clf.train_score_, 'b-',
label='Training Set Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, test_score, 'r-',
label='Test Set Deviance')
plt.legend(loc='upper right')
plt.xlabel('Boosting Iterations')
plt.ylabel('Deviance')
###############################################################################
# Plot feature importance
feature_importance = clf.feature_importances_
# make importances relative to max importance
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + .5
plt.subplot(1, 2, 2)
plt.barh(pos, feature_importance[sorted_idx], align='center')
plt.yticks(pos, boston.feature_names[sorted_idx])
plt.xlabel('Relative Importance')
plt.title('Variable Importance')
plt.show()
| bsd-3-clause |
thilbern/scikit-learn | sklearn/utils/testing.py | 4 | 21772 | """Testing utilities."""
# Copyright (c) 2011, 2012
# Authors: Pietro Berkes,
# Andreas Muller
# Mathieu Blondel
# Olivier Grisel
# Arnaud Joly
# Denis Engemann
# License: BSD 3 clause
import os
import inspect
import pkgutil
import warnings
import sys
import re
import platform
import scipy as sp
import scipy.io
from functools import wraps
try:
# Python 2
from urllib2 import urlopen
from urllib2 import HTTPError
except ImportError:
# Python 3+
from urllib.request import urlopen
from urllib.error import HTTPError
import sklearn
from sklearn.base import BaseEstimator
# Conveniently import all assertions in one place.
from nose.tools import assert_equal
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_false
from nose.tools import assert_raises
from nose.tools import raises
from nose import SkipTest
from nose import with_setup
from numpy.testing import assert_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_less
import numpy as np
from sklearn.base import (ClassifierMixin, RegressorMixin, TransformerMixin,
ClusterMixin)
__all__ = ["assert_equal", "assert_not_equal", "assert_raises",
"assert_raises_regexp", "raises", "with_setup", "assert_true",
"assert_false", "assert_almost_equal", "assert_array_equal",
"assert_array_almost_equal", "assert_array_less",
"assert_less", "assert_less_equal",
"assert_greater", "assert_greater_equal"]
try:
from nose.tools import assert_in, assert_not_in
except ImportError:
# Nose < 1.0.0
def assert_in(x, container):
assert_true(x in container, msg="%r in %r" % (x, container))
def assert_not_in(x, container):
assert_false(x in container, msg="%r in %r" % (x, container))
try:
from nose.tools import assert_raises_regex
except ImportError:
# for Py 2.6
def assert_raises_regex(expected_exception, expected_regexp,
callable_obj=None, *args, **kwargs):
"""Helper function to check for message patterns in exceptions"""
not_raised = False
try:
callable_obj(*args, **kwargs)
not_raised = True
except Exception as e:
error_message = str(e)
if not re.compile(expected_regexp).search(error_message):
raise AssertionError("Error message should match pattern "
"%r. %r does not." %
(expected_regexp, error_message))
if not_raised:
raise AssertionError("Should have raised %r" %
expected_exception(expected_regexp))
# assert_raises_regexp is deprecated in Python 3.4 in favor of
# assert_raises_regex but lets keep the bacward compat in scikit-learn with
# the old name for now
assert_raises_regexp = assert_raises_regex
def _assert_less(a, b, msg=None):
message = "%r is not lower than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a < b, message
def _assert_greater(a, b, msg=None):
message = "%r is not greater than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a > b, message
def assert_less_equal(a, b, msg=None):
message = "%r is not lower than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a <= b, message
def assert_greater_equal(a, b, msg=None):
message = "%r is not greater than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a >= b, message
def assert_warns(warning_class, func, *args, **kw):
"""Test that a certain warning occurs.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
Returns
-------
result : the return value of `func`
"""
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = any(warning.category is warning_class for warning in w)
if not found:
raise AssertionError("%s did not give warning: %s( is %s)"
% (func.__name__, warning_class, w))
return result
def assert_warns_message(warning_class, message, func, *args, **kw):
# very important to avoid uncontrolled state propagation
"""Test that a certain warning occurs and with a certain message.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
message : str | callable
The entire message or a substring to test for. If callable,
it takes a string as argument and will trigger an assertion error
if it returns `False`.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`.
Returns
-------
result : the return value of `func`
"""
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
if hasattr(np, 'VisibleDeprecationWarning'):
# Let's not catch the numpy internal DeprecationWarnings
warnings.simplefilter('ignore', np.VisibleDeprecationWarning)
# Trigger a warning.
result = func(*args, **kw)
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
if not w[0].category is warning_class:
raise AssertionError("First warning for %s is not a "
"%s( is %s)"
% (func.__name__, warning_class, w[0]))
# substring will match, the entire message with typo won't
msg = w[0].message # For Python 3 compatibility
msg = str(msg.args[0] if hasattr(msg, 'args') else msg)
if callable(message): # add support for certain tests
check_in_message = message
else:
check_in_message = lambda msg: message in msg
if not check_in_message(msg):
raise AssertionError("The message received ('%s') for <%s> is "
"not the one you expected ('%s')"
% (msg, func.__name__, message
))
return result
# To remove when we support numpy 1.7
def assert_no_warnings(func, *args, **kw):
# XXX: once we may depend on python >= 2.6, this can be replaced by the
# warnings module context manager.
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
if len(w) > 0:
raise AssertionError("Got warnings when calling %s: %s"
% (func.__name__, w))
return result
def ignore_warnings(obj=None):
""" Context manager and decorator to ignore warnings
Note. Using this (in both variants) will clear all warnings
from all python modules loaded. In case you need to test
cross-module-warning-logging this is not your tool of choice.
Examples
--------
>>> with ignore_warnings():
... warnings.warn('buhuhuhu')
>>> def nasty_warn():
... warnings.warn('buhuhuhu')
... print(42)
>>> ignore_warnings(nasty_warn)()
42
"""
if callable(obj):
return _ignore_warnings(obj)
else:
return _IgnoreWarnings()
def _ignore_warnings(fn):
"""Decorator to catch and hide warnings without visual nesting"""
@wraps(fn)
def wrapper(*args, **kwargs):
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
return fn(*args, **kwargs)
w[:] = []
return wrapper
class _IgnoreWarnings(object):
"""Improved and simplified Python warnings context manager
Copied from Python 2.7.5 and modified as required.
"""
def __init__(self):
"""
Parameters
==========
category : warning class
The category to filter. Defaults to Warning. If None,
all categories will be muted.
"""
self._record = True
self._module = sys.modules['warnings']
self._entered = False
self.log = []
def __repr__(self):
args = []
if self._record:
args.append("record=True")
if self._module is not sys.modules['warnings']:
args.append("module=%r" % self._module)
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
clean_warning_registry() # be safe and not propagate state + chaos
warnings.simplefilter('always')
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
if self._record:
self.log = []
def showwarning(*args, **kwargs):
self.log.append(warnings.WarningMessage(*args, **kwargs))
self._module.showwarning = showwarning
return self.log
else:
return None
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
self.log[:] = []
clean_warning_registry() # be safe and not propagate state + chaos
try:
from nose.tools import assert_less
except ImportError:
assert_less = _assert_less
try:
from nose.tools import assert_greater
except ImportError:
assert_greater = _assert_greater
def _assert_allclose(actual, desired, rtol=1e-7, atol=0,
err_msg='', verbose=True):
actual, desired = np.asanyarray(actual), np.asanyarray(desired)
if np.allclose(actual, desired, rtol=rtol, atol=atol):
return
msg = ('Array not equal to tolerance rtol=%g, atol=%g: '
'actual %s, desired %s') % (rtol, atol, actual, desired)
raise AssertionError(msg)
if hasattr(np.testing, 'assert_allclose'):
assert_allclose = np.testing.assert_allclose
else:
assert_allclose = _assert_allclose
def assert_raise_message(exception, message, function, *args, **kwargs):
"""Helper function to test error messages in exceptions"""
try:
function(*args, **kwargs)
raise AssertionError("Should have raised %r" % exception(message))
except exception as e:
error_message = str(e)
assert_in(message, error_message)
def fake_mldata(columns_dict, dataname, matfile, ordering=None):
"""Create a fake mldata data set.
Parameters
----------
columns_dict: contains data as
columns_dict[column_name] = array of data
dataname: name of data set
matfile: file-like object or file name
ordering: list of column_names, determines the ordering in the data set
Note: this function transposes all arrays, while fetch_mldata only
transposes 'data', keep that into account in the tests.
"""
datasets = dict(columns_dict)
# transpose all variables
for name in datasets:
datasets[name] = datasets[name].T
if ordering is None:
ordering = sorted(list(datasets.keys()))
# NOTE: setting up this array is tricky, because of the way Matlab
# re-packages 1D arrays
datasets['mldata_descr_ordering'] = sp.empty((1, len(ordering)),
dtype='object')
for i, name in enumerate(ordering):
datasets['mldata_descr_ordering'][0, i] = name
scipy.io.savemat(matfile, datasets, oned_as='column')
class mock_mldata_urlopen(object):
def __init__(self, mock_datasets):
"""Object that mocks the urlopen function to fake requests to mldata.
`mock_datasets` is a dictionary of {dataset_name: data_dict}, or
{dataset_name: (data_dict, ordering).
`data_dict` itself is a dictionary of {column_name: data_array},
and `ordering` is a list of column_names to determine the ordering
in the data set (see `fake_mldata` for details).
When requesting a dataset with a name that is in mock_datasets,
this object creates a fake dataset in a StringIO object and
returns it. Otherwise, it raises an HTTPError.
"""
self.mock_datasets = mock_datasets
def __call__(self, urlname):
dataset_name = urlname.split('/')[-1]
if dataset_name in self.mock_datasets:
resource_name = '_' + dataset_name
from io import BytesIO
matfile = BytesIO()
dataset = self.mock_datasets[dataset_name]
ordering = None
if isinstance(dataset, tuple):
dataset, ordering = dataset
fake_mldata(dataset, resource_name, matfile, ordering)
matfile.seek(0)
return matfile
else:
raise HTTPError(urlname, 404, dataset_name + " is not available",
[], None)
def install_mldata_mock(mock_datasets):
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = mock_mldata_urlopen(mock_datasets)
def uninstall_mldata_mock():
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = urlopen
# Meta estimators need another estimator to be instantiated.
META_ESTIMATORS = ["OneVsOneClassifier",
"OutputCodeClassifier", "OneVsRestClassifier", "RFE",
"RFECV", "BaseEnsemble"]
# estimators that there is no way to default-construct sensibly
OTHER = ["Pipeline", "FeatureUnion", "GridSearchCV", "RandomizedSearchCV"]
# some trange ones
DONT_TEST = ['SparseCoder', 'EllipticEnvelope', 'DictVectorizer',
'LabelBinarizer', 'LabelEncoder', 'MultiLabelBinarizer',
'TfidfTransformer', 'IsotonicRegression', 'OneHotEncoder',
'RandomTreesEmbedding', 'FeatureHasher', 'DummyClassifier',
'DummyRegressor', 'TruncatedSVD', 'PolynomialFeatures']
def all_estimators(include_meta_estimators=False, include_other=False,
type_filter=None, include_dont_test=False):
"""Get a list of all estimators from sklearn.
This function crawls the module and gets all classes that inherit
from BaseEstimator. Classes that are defined in test-modules are not
included.
By default meta_estimators such as GridSearchCV are also not included.
Parameters
----------
include_meta_estimators : boolean, default=False
Whether to include meta-estimators that can be constructed using
an estimator as their first argument. These are currently
BaseEnsemble, OneVsOneClassifier, OutputCodeClassifier,
OneVsRestClassifier, RFE, RFECV.
include_other : boolean, default=False
Wether to include meta-estimators that are somehow special and can
not be default-constructed sensibly. These are currently
Pipeline, FeatureUnion and GridSearchCV
include_dont_test : boolean, default=False
Whether to include "special" label estimator or test processors.
type_filter : string, list of string, or None, default=None
Which kind of estimators should be returned. If None, no filter is
applied and all estimators are returned. Possible values are
'classifier', 'regressor', 'cluster' and 'transformer' to get
estimators only of these specific types, or a list of these to
get the estimators that fit at least one of the types.
Returns
-------
estimators : list of tuples
List of (name, class), where ``name`` is the class name as string
and ``class`` is the actuall type of the class.
"""
def is_abstract(c):
if not(hasattr(c, '__abstractmethods__')):
return False
if not len(c.__abstractmethods__):
return False
return True
all_classes = []
# get parent folder
path = sklearn.__path__
for importer, modname, ispkg in pkgutil.walk_packages(
path=path, prefix='sklearn.', onerror=lambda x: None):
if ".tests." in modname:
continue
module = __import__(modname, fromlist="dummy")
classes = inspect.getmembers(module, inspect.isclass)
all_classes.extend(classes)
all_classes = set(all_classes)
estimators = [c for c in all_classes
if (issubclass(c[1], BaseEstimator)
and c[0] != 'BaseEstimator')]
# get rid of abstract base classes
estimators = [c for c in estimators if not is_abstract(c[1])]
if not include_dont_test:
estimators = [c for c in estimators if not c[0] in DONT_TEST]
if not include_other:
estimators = [c for c in estimators if not c[0] in OTHER]
# possibly get rid of meta estimators
if not include_meta_estimators:
estimators = [c for c in estimators if not c[0] in META_ESTIMATORS]
if type_filter is not None:
if not isinstance(type_filter, list):
type_filter = [type_filter]
else:
type_filter = list(type_filter) # copy
filtered_estimators = []
filters = {'classifier': ClassifierMixin,
'regressor': RegressorMixin,
'transformer': TransformerMixin,
'cluster': ClusterMixin}
for name, mixin in filters.items():
if name in type_filter:
type_filter.remove(name)
filtered_estimators.extend([est for est in estimators
if issubclass(est[1], mixin)])
estimators = filtered_estimators
if type_filter:
raise ValueError("Parameter type_filter must be 'classifier', "
"'regressor', 'transformer', 'cluster' or None, got"
" %s." % repr(type_filter))
# drop duplicates, sort for reproducibility
return sorted(set(estimators))
def set_random_state(estimator, random_state=0):
if "random_state" in estimator.get_params().keys():
estimator.set_params(random_state=random_state)
def if_matplotlib(func):
"""Test decorator that skips test if matplotlib not installed. """
@wraps(func)
def run_test(*args, **kwargs):
try:
import matplotlib
matplotlib.use('Agg', warn=False)
# this fails if no $DISPLAY specified
matplotlib.pylab.figure()
except:
raise SkipTest('Matplotlib not available.')
else:
return func(*args, **kwargs)
return run_test
def if_not_mac_os(versions=('10.7', '10.8', '10.9'),
message='Multi-process bug in Mac OS X >= 10.7 '
'(see issue #636)'):
"""Test decorator that skips test if OS is Mac OS X and its
major version is one of ``versions``.
"""
mac_version, _, _ = platform.mac_ver()
skip = '.'.join(mac_version.split('.')[:2]) in versions
def decorator(func):
if skip:
@wraps(func)
def func(*args, **kwargs):
raise SkipTest(message)
return func
return decorator
def clean_warning_registry():
"""Safe way to reset warnings """
warnings.resetwarnings()
reg = "__warningregistry__"
for mod_name, mod in list(sys.modules.items()):
if 'six.moves' in mod_name:
continue
if hasattr(mod, reg):
getattr(mod, reg).clear()
def check_skip_network():
if int(os.environ.get('SKLEARN_SKIP_NETWORK_TESTS', 0)):
raise SkipTest("Text tutorial requires large dataset download")
def check_skip_travis():
"""Skip test if being run on Travis."""
if os.environ.get('TRAVIS') == "true":
raise SkipTest("This test needs to be skipped on Travis")
with_network = with_setup(check_skip_network)
with_travis = with_setup(check_skip_travis)
| bsd-3-clause |
rodorad/spark-tk | regression-tests/sparktkregtests/testcases/scoretests/linear_regression_test.py | 2 | 2224 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Tests Linear Regression scoring engine """
import unittest
import os
from sparktkregtests.lib import sparktk_test
from sparktkregtests.lib import scoring_utils
class LinearRegression(sparktk_test.SparkTKTestCase):
def setUp(self):
"""Build test frame"""
super(LinearRegression, self).setUp()
dataset = self.get_file("linear_regression_gen.csv")
schema = [("c1", float),
("c2", float),
("c3", float),
("c4", float),
("label", float)]
self.frame = self.context.frame.import_csv(
dataset, schema=schema)
def test_model_scoring(self):
"""Test publishing a linear regression model"""
model = self.context.models.regression.linear_regression.train(self.frame, "label", ['c1', 'c2', 'c3', 'c4'])
predict = model.predict(self.frame, ['c1', 'c2', 'c3', 'c4'])
test_rows = predict.to_pandas(predict.count())
file_name = self.get_name("linear_regression")
model_path = model.export_to_mar(self.get_export_file(file_name))
with scoring_utils.scorer(
model_path, self.id()) as scorer:
for _, i in test_rows.iterrows():
res = scorer.score(
[dict(zip(["c1", "c2", "c3", "c4"], list(i[0:4])))])
self.assertEqual(
i["predicted_value"], res.json()["data"][0]['Prediction'])
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
devanjedi/opencricket | src/ci/scorecardParser.py | 1 | 1202 | #!/usr/bin/python
from bs4 import BeautifulSoup
import re
import urllib3
import time
import pandas
#httpool = urllib3.PoolManager()
#response = httpool.request('GET','http://stats.espncricinfo.com/ci/engine/stats/index.html?class=1;opposition=8;team=2;template=results;type=bowling;view=innings')
#html = response.data
soup = BeautifulSoup(open("../../testdata/score.html"))
#print (soup.prettify())
t = soup.title.contents[0]
#print t.contents[0]
reg="([^:]*): (.*?) v (.*?) at ([^,]*), ([^|]*)| "
lt = re.compile(reg)
mo = lt.match(t)
test= mo.group(1)
team1 = mo.group(2)
team2 = mo.group(3)
location = mo.group(4)
date = mo.group(5)
print team1
print date
#for player in soup.find_all('a', class_="playerName"):
# print player.string
# print player.get('href')
#for i in range(1,11):
# soup = BeautifulSoup(html)
# time.sleep(1)
# link= soup.find('a', class_="PaginationLink").get('href')
# print link
# response = httpool.request('GET','http://stats.espncricinfo.com'+link)
# html = response.data
soup = BeautifulSoup(open('../../testdata/guru.html'))
tdata=soup.find(class_='guruNav').contents[0]
print tdata
#dfs = pandas.io.html.read_html(tdata)
#print dfs
| gpl-2.0 |
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/pandas/tests/indexing/multiindex/conftest.py | 2 | 1111 | import numpy as np
import pytest
from pandas import DataFrame, Index, MultiIndex
from pandas.util import testing as tm
@pytest.fixture
def multiindex_dataframe_random_data():
"""DataFrame with 2 level MultiIndex with random data"""
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["first", "second"],
)
return DataFrame(
np.random.randn(10, 3), index=index, columns=Index(["A", "B", "C"], name="exp")
)
@pytest.fixture
def multiindex_year_month_day_dataframe_random_data():
"""DataFrame with 3 level MultiIndex (year, month, day) covering
first 100 business days from 2000-01-01 with random data"""
tdf = tm.makeTimeDataFrame(100)
ymd = tdf.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day]).sum()
# use Int64Index, to make sure things work
ymd.index.set_levels([lev.astype("i8") for lev in ymd.index.levels], inplace=True)
ymd.index.set_names(["year", "month", "day"], inplace=True)
return ymd
| apache-2.0 |
nmartensen/pandas | pandas/core/indexes/numeric.py | 4 | 13436 | import numpy as np
from pandas._libs import (index as libindex,
algos as libalgos, join as libjoin)
from pandas.core.dtypes.common import (
is_dtype_equal,
pandas_dtype,
is_float_dtype,
is_object_dtype,
is_integer_dtype,
is_bool,
is_bool_dtype,
is_scalar)
from pandas.core.common import _asarray_tuplesafe, _values_from_object
from pandas import compat
from pandas.core import algorithms
from pandas.core.indexes.base import (
Index, InvalidIndexError, _index_shared_docs)
from pandas.util._decorators import Appender, cache_readonly
import pandas.core.indexes.base as ibase
_num_index_shared_docs = dict()
class NumericIndex(Index):
"""
Provide numeric type operations
This is an abstract class
"""
_is_numeric_dtype = True
def __new__(cls, data=None, dtype=None, copy=False, name=None,
fastpath=False):
if fastpath:
return cls._simple_new(data, name=name)
# isscalar, generators handled in coerce_to_ndarray
data = cls._coerce_to_ndarray(data)
if issubclass(data.dtype.type, compat.string_types):
cls._string_data_error(data)
if copy or not is_dtype_equal(data.dtype, cls._default_dtype):
subarr = np.array(data, dtype=cls._default_dtype, copy=copy)
cls._assert_safe_casting(data, subarr)
else:
subarr = data
if name is None and hasattr(data, 'name'):
name = data.name
return cls._simple_new(subarr, name=name)
@Appender(_index_shared_docs['_maybe_cast_slice_bound'])
def _maybe_cast_slice_bound(self, label, side, kind):
assert kind in ['ix', 'loc', 'getitem', None]
# we will try to coerce to integers
return self._maybe_cast_indexer(label)
def _convert_for_op(self, value):
""" Convert value to be insertable to ndarray """
if is_bool(value) or is_bool_dtype(value):
# force conversion to object
# so we don't lose the bools
raise TypeError
return value
def _convert_tolerance(self, tolerance):
try:
return float(tolerance)
except ValueError:
raise ValueError('tolerance argument for %s must be numeric: %r' %
(type(self).__name__, tolerance))
@classmethod
def _assert_safe_casting(cls, data, subarr):
"""
Subclasses need to override this only if the process of casting data
from some accepted dtype to the internal dtype(s) bears the risk of
truncation (e.g. float to int).
"""
pass
@property
def is_all_dates(self):
"""
Checks that all the labels are datetime objects
"""
return False
_num_index_shared_docs['class_descr'] = """
Immutable ndarray implementing an ordered, sliceable set. The basic object
storing axis labels for all pandas objects. %(klass)s is a special case
of `Index` with purely %(ltype)s labels. %(extra)s
Parameters
----------
data : array-like (1-dimensional)
dtype : NumPy dtype (default: %(dtype)s)
copy : bool
Make a copy of input ndarray
name : object
Name to be stored in the index
Notes
-----
An Index instance can **only** contain hashable objects.
"""
_int64_descr_args = dict(
klass='Int64Index',
ltype='integer',
dtype='int64',
extra="""This is the default index type used
by the DataFrame and Series ctors when no explicit
index is provided by the user.
"""
)
class Int64Index(NumericIndex):
__doc__ = _num_index_shared_docs['class_descr'] % _int64_descr_args
_typ = 'int64index'
_arrmap = libalgos.arrmap_int64
_left_indexer_unique = libjoin.left_join_indexer_unique_int64
_left_indexer = libjoin.left_join_indexer_int64
_inner_indexer = libjoin.inner_join_indexer_int64
_outer_indexer = libjoin.outer_join_indexer_int64
_can_hold_na = False
_engine_type = libindex.Int64Engine
_default_dtype = np.int64
@property
def inferred_type(self):
return 'integer'
@property
def asi8(self):
# do not cache or you'll create a memory leak
return self.values.view('i8')
@Appender(_index_shared_docs['_convert_scalar_indexer'])
def _convert_scalar_indexer(self, key, kind=None):
assert kind in ['ix', 'loc', 'getitem', 'iloc', None]
# don't coerce ilocs to integers
if kind != 'iloc':
key = self._maybe_cast_indexer(key)
return (super(Int64Index, self)
._convert_scalar_indexer(key, kind=kind))
def _wrap_joined_index(self, joined, other):
name = self.name if self.name == other.name else None
return Int64Index(joined, name=name)
@classmethod
def _assert_safe_casting(cls, data, subarr):
"""
Ensure incoming data can be represented as ints.
"""
if not issubclass(data.dtype.type, np.signedinteger):
if not np.array_equal(data, subarr):
raise TypeError('Unsafe NumPy casting, you must '
'explicitly cast')
Int64Index._add_numeric_methods()
Int64Index._add_logical_methods()
_uint64_descr_args = dict(
klass='UInt64Index',
ltype='unsigned integer',
dtype='uint64',
extra=''
)
class UInt64Index(NumericIndex):
__doc__ = _num_index_shared_docs['class_descr'] % _uint64_descr_args
_typ = 'uint64index'
_arrmap = libalgos.arrmap_uint64
_left_indexer_unique = libjoin.left_join_indexer_unique_uint64
_left_indexer = libjoin.left_join_indexer_uint64
_inner_indexer = libjoin.inner_join_indexer_uint64
_outer_indexer = libjoin.outer_join_indexer_uint64
_can_hold_na = False
_na_value = 0
_engine_type = libindex.UInt64Engine
_default_dtype = np.uint64
@property
def inferred_type(self):
return 'integer'
@property
def asi8(self):
# do not cache or you'll create a memory leak
return self.values.view('u8')
@Appender(_index_shared_docs['_convert_scalar_indexer'])
def _convert_scalar_indexer(self, key, kind=None):
assert kind in ['ix', 'loc', 'getitem', 'iloc', None]
# don't coerce ilocs to integers
if kind != 'iloc':
key = self._maybe_cast_indexer(key)
return (super(UInt64Index, self)
._convert_scalar_indexer(key, kind=kind))
@Appender(_index_shared_docs['_convert_arr_indexer'])
def _convert_arr_indexer(self, keyarr):
# Cast the indexer to uint64 if possible so
# that the values returned from indexing are
# also uint64.
keyarr = _asarray_tuplesafe(keyarr)
if is_integer_dtype(keyarr):
return _asarray_tuplesafe(keyarr, dtype=np.uint64)
return keyarr
@Appender(_index_shared_docs['_convert_index_indexer'])
def _convert_index_indexer(self, keyarr):
# Cast the indexer to uint64 if possible so
# that the values returned from indexing are
# also uint64.
if keyarr.is_integer():
return keyarr.astype(np.uint64)
return keyarr
def _wrap_joined_index(self, joined, other):
name = self.name if self.name == other.name else None
return UInt64Index(joined, name=name)
@classmethod
def _assert_safe_casting(cls, data, subarr):
"""
Ensure incoming data can be represented as uints.
"""
if not issubclass(data.dtype.type, np.unsignedinteger):
if not np.array_equal(data, subarr):
raise TypeError('Unsafe NumPy casting, you must '
'explicitly cast')
UInt64Index._add_numeric_methods()
UInt64Index._add_logical_methods()
_float64_descr_args = dict(
klass='Float64Index',
dtype='float64',
ltype='float',
extra=''
)
class Float64Index(NumericIndex):
__doc__ = _num_index_shared_docs['class_descr'] % _float64_descr_args
_typ = 'float64index'
_engine_type = libindex.Float64Engine
_arrmap = libalgos.arrmap_float64
_left_indexer_unique = libjoin.left_join_indexer_unique_float64
_left_indexer = libjoin.left_join_indexer_float64
_inner_indexer = libjoin.inner_join_indexer_float64
_outer_indexer = libjoin.outer_join_indexer_float64
_default_dtype = np.float64
@property
def inferred_type(self):
return 'floating'
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True):
dtype = pandas_dtype(dtype)
if is_float_dtype(dtype):
values = self._values.astype(dtype, copy=copy)
elif is_integer_dtype(dtype):
if self.hasnans:
raise ValueError('cannot convert float NaN to integer')
values = self._values.astype(dtype, copy=copy)
elif is_object_dtype(dtype):
values = self._values.astype('object', copy=copy)
else:
raise TypeError('Setting %s dtype to anything other than '
'float64 or object is not supported' %
self.__class__)
return Index(values, name=self.name, dtype=dtype)
@Appender(_index_shared_docs['_convert_scalar_indexer'])
def _convert_scalar_indexer(self, key, kind=None):
assert kind in ['ix', 'loc', 'getitem', 'iloc', None]
if kind == 'iloc':
return self._validate_indexer('positional', key, kind)
return key
@Appender(_index_shared_docs['_convert_slice_indexer'])
def _convert_slice_indexer(self, key, kind=None):
# if we are not a slice, then we are done
if not isinstance(key, slice):
return key
if kind == 'iloc':
return super(Float64Index, self)._convert_slice_indexer(key,
kind=kind)
# translate to locations
return self.slice_indexer(key.start, key.stop, key.step, kind=kind)
def _format_native_types(self, na_rep='', float_format=None, decimal='.',
quoting=None, **kwargs):
from pandas.io.formats.format import FloatArrayFormatter
formatter = FloatArrayFormatter(self.values, na_rep=na_rep,
float_format=float_format,
decimal=decimal, quoting=quoting,
fixed_width=False)
return formatter.get_result_as_array()
def get_value(self, series, key):
""" we always want to get an index value, never a value """
if not is_scalar(key):
raise InvalidIndexError
k = _values_from_object(key)
loc = self.get_loc(k)
new_values = _values_from_object(series)[loc]
return new_values
def equals(self, other):
"""
Determines if two Index objects contain the same elements.
"""
if self is other:
return True
if not isinstance(other, Index):
return False
# need to compare nans locations and make sure that they are the same
# since nans don't compare equal this is a bit tricky
try:
if not isinstance(other, Float64Index):
other = self._constructor(other)
if (not is_dtype_equal(self.dtype, other.dtype) or
self.shape != other.shape):
return False
left, right = self._values, other._values
return ((left == right) | (self._isnan & other._isnan)).all()
except (TypeError, ValueError):
return False
def __contains__(self, other):
if super(Float64Index, self).__contains__(other):
return True
try:
# if other is a sequence this throws a ValueError
return np.isnan(other) and self.hasnans
except ValueError:
try:
return len(other) <= 1 and ibase._try_get_item(other) in self
except TypeError:
return False
except:
return False
@Appender(_index_shared_docs['get_loc'])
def get_loc(self, key, method=None, tolerance=None):
try:
if np.all(np.isnan(key)):
nan_idxs = self._nan_idxs
try:
return nan_idxs.item()
except (ValueError, IndexError):
# should only need to catch ValueError here but on numpy
# 1.7 .item() can raise IndexError when NaNs are present
if not len(nan_idxs):
raise KeyError(key)
return nan_idxs
except (TypeError, NotImplementedError):
pass
return super(Float64Index, self).get_loc(key, method=method,
tolerance=tolerance)
@cache_readonly
def is_unique(self):
return super(Float64Index, self).is_unique and self._nan_idxs.size < 2
@Appender(Index.isin.__doc__)
def isin(self, values, level=None):
if level is not None:
self._validate_index_level(level)
return algorithms.isin(np.array(self), values)
Float64Index._add_numeric_methods()
Float64Index._add_logical_methods_disabled()
| bsd-3-clause |
depet/scikit-learn | sklearn/decomposition/truncated_svd.py | 1 | 6027 | """Truncated SVD for sparse matrices, aka latent semantic analysis (LSA).
"""
# Author: Lars Buitinck <[email protected]>
# License: 3-clause BSD.
import numpy as np
try:
from scipy.sparse.linalg import svds
except ImportError:
from ..utils.arpack import svds
from ..base import BaseEstimator, TransformerMixin
from ..utils import (array2d, as_float_array, atleast2d_or_csr,
check_random_state)
from ..utils.extmath import randomized_svd, safe_sparse_dot, svd_flip
__all__ = ["TruncatedSVD"]
class TruncatedSVD(BaseEstimator, TransformerMixin):
"""Dimensionality reduction using truncated SVD (aka LSA).
This transformer performs linear dimensionality reduction by means of
truncated singular value decomposition (SVD). It is very similar to PCA,
but operates on sample vectors directly, instead of on a covariance matrix.
This means it can work with scipy.sparse matrices efficiently.
In particular, truncated SVD works on term count/tf-idf matrices as
returned by the vectorizers in sklearn.feature_extraction.text. In that
context, it is known as latent semantic analysis (LSA).
Parameters
----------
n_components : int, default = 2
Desired dimensionality of output data.
Must be strictly less than the number of features.
The default value is useful for visualisation. For LSA, a value of
100 is recommended.
algorithm : string, default = "randomized"
SVD solver to use. Either "arpack" for the ARPACK wrapper in SciPy
(scipy.sparse.linalg.svds), or "randomized" for the randomized
algorithm due to Halko (2009).
n_iterations : int, optional
Number of iterations for randomized SVD solver. Not used by ARPACK.
random_state : int or RandomState, optional
(Seed for) pseudo-random number generator. If not given, the
numpy.random singleton is used.
tol : float, optional
Tolerance for ARPACK. 0 means machine precision. Ignored by randomized
SVD solver.
Attributes
----------
`components_` : array, shape (n_components, n_features)
See also
--------
PCA
RandomizedPCA
References
----------
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
Notes
-----
SVD suffers from a problem called "sign indeterminancy", which means the
sign of the ``components_`` and the output from transform depend on the
algorithm and random state. To work around this, fit instances of this
class to data once, then keep the instance around to do transformations.
"""
def __init__(self, n_components=2, algorithm="randomized",
n_iterations=5, random_state=None, tol=0.):
self.algorithm = algorithm
self.n_components = n_components
self.n_iterations = n_iterations
self.random_state = random_state
self.tol = tol
def fit(self, X, y=None):
"""Fit LSI model on training data X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
self : object
Returns the transformer object.
"""
self._fit(X)
return self
def fit_transform(self, X, y=None):
"""Fit LSI model to X and perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
U, Sigma, VT = self._fit(X)
Sigma = np.diag(Sigma)
# or (X * VT.T).T, whichever takes fewer operations...
return np.dot(U, Sigma.T)
def _fit(self, X):
X = as_float_array(X, copy=False)
random_state = check_random_state(self.random_state)
if self.algorithm == "arpack":
U, Sigma, VT = svds(X, k=self.n_components, tol=self.tol)
# svds doesn't abide by scipy.linalg.svd/randomized_svd
# conventions, so reverse its outputs.
Sigma = Sigma[::-1]
U, VT = svd_flip(U[:, ::-1], VT[::-1])
elif self.algorithm == "randomized":
k = self.n_components
n_features = X.shape[1]
if k >= n_features:
raise ValueError("n_components must be < n_features;"
" got %d >= %d" % (k, n_features))
U, Sigma, VT = randomized_svd(X, self.n_components,
n_iter=self.n_iterations,
random_state=random_state)
else:
raise ValueError("unknown algorithm %r" % self.algorithm)
self.components_ = VT
return U, Sigma, VT
def transform(self, X):
"""Perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = atleast2d_or_csr(X)
return safe_sparse_dot(X, self.components_.T)
def inverse_transform(self, X):
"""Transform X back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data.
Returns
-------
X_original : array, shape (n_samples, n_features)
Note that this is always a dense array.
"""
X = array2d(X)
return np.dot(X, self.components_)
| bsd-3-clause |
sumspr/scikit-learn | examples/tree/plot_tree_regression_multioutput.py | 206 | 1800 | """
===================================================================
Multi-output Decision Tree Regression
===================================================================
An example to illustrate multi-output regression with decision tree.
The :ref:`decision trees <tree>`
is used to predict simultaneously the noisy x and y observations of a circle
given a single underlying feature. As a result, it learns local linear
regressions approximating the circle.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y[::5, :] += (0.5 - rng.rand(20, 2))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_3 = DecisionTreeRegressor(max_depth=8)
regr_1.fit(X, y)
regr_2.fit(X, y)
regr_3.fit(X, y)
# Predict
X_test = np.arange(-100.0, 100.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
y_3 = regr_3.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(y[:, 0], y[:, 1], c="k", label="data")
plt.scatter(y_1[:, 0], y_1[:, 1], c="g", label="max_depth=2")
plt.scatter(y_2[:, 0], y_2[:, 1], c="r", label="max_depth=5")
plt.scatter(y_3[:, 0], y_3[:, 1], c="b", label="max_depth=8")
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("data")
plt.ylabel("target")
plt.title("Multi-output Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
alberto-antonietti/nest-simulator | pynest/examples/sensitivity_to_perturbation.py | 3 | 8848 | # -*- coding: utf-8 -*-
#
# sensitivity_to_perturbation.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Sensitivity to perturbation
---------------------------
This script simulates a network in two successive trials, which are identical
except for one extra input spike in the second realisation (a small
perturbation). The network consists of recurrent, randomly connected excitatory
and inhibitory neurons. Its activity is driven by an external Poisson input
provided to all neurons independently. In order to ensure that the network is
reset appropriately between the trials, we do the following steps:
- resetting the network
- resetting the random network generator
- resetting the internal clock
- deleting all entries in the spike detector
- introducing a hyperpolarisation phase between the trials
(in order to avoid that spikes remaining in the NEST memory
after the first simulation are fed into the second simulation)
"""
###############################################################################
# Importing all necessary modules for simulation, analysis and plotting.
import numpy
import matplotlib.pyplot as plt
import nest
###############################################################################
# Here we define all parameters necessary for building and simulating the
# network.
# We start with the global network parameters.
NE = 1000 # number of excitatory neurons
NI = 250 # number of inhibitory neurons
N = NE + NI # total number of neurons
KE = 100 # excitatory in-degree
KI = 25 # inhibitory in-degree
###############################################################################
# Parameters specific for the neurons in the network. The default values of
# the reset potential ``E_L`` and the spiking threshold ``V_th`` are used to set
# the limits of the initial potential of the neurons.
neuron_model = 'iaf_psc_delta'
neuron_params = nest.GetDefaults(neuron_model)
Vmin = neuron_params['E_L'] # minimum of initial potential distribution (mV)
Vmax = neuron_params['V_th'] # maximum of initial potential distribution (mV)
###############################################################################
# Synapse parameters. Changing the weights `J` in the network can lead to
# qualitatively different behaviors. If `J` is small (e.g. ``J = 0.1``), we
# are likely to observe a non-chaotic network behavior (after perturbation
# the network returns to its original activity). Increasing `J`
# (e.g ``J = 5.5``) leads to rather chaotic activity. Given that in this
# example the transition to chaos is probabilistic, we sometimes observe
# chaotic behavior for small weights (e.g. ``J = 0.5``) and non-chaotic
# behavior for strong weights (e.g. ``J = 5.4``).
J = 0.5 # excitatory synaptic weight (mV)
g = 6. # relative inhibitory weight
delay = 0.1 # spike transmission delay (ms)
# External input parameters.
Jext = 0.2 # PSP amplitude for external Poisson input (mV)
rate_ext = 6500. # rate of the external Poisson input
# Perturbation parameters.
t_stim = 400. # perturbation time (time of the extra spike)
Jstim = Jext # perturbation amplitude (mV)
# Simulation parameters.
T = 1000. # simulation time per trial (ms)
fade_out = 2. * delay # fade out time (ms)
dt = 0.01 # simulation time resolution (ms)
seed_NEST = 30 # seed of random number generator in Nest
seed_numpy = 30 # seed of random number generator in numpy
senders = []
spiketimes = []
###############################################################################
# we run the two simulations successively. After each simulation the
# sender ids and spiketimes are stored in a list (``senders``, ``spiketimes``).
for trial in [0, 1]:
# Before we build the network, we reset the simulation kernel to ensure
# that previous NEST simulations in the python shell will not disturb this
# simulation and set the simulation resolution (later defined
# synaptic delays cannot be smaller than the simulation resolution).
nest.ResetKernel()
nest.SetKernelStatus({"resolution": dt})
###############################################################################
# Now we start building the network and create excitatory and inhibitory nodes
# and connect them. According to the connectivity specification, each neuron
# is assigned random KE synapses from the excitatory population and random KI
# synapses from the inhibitory population.
nodes_ex = nest.Create(neuron_model, NE)
nodes_in = nest.Create(neuron_model, NI)
allnodes = nodes_ex + nodes_in
nest.Connect(nodes_ex, allnodes,
conn_spec={'rule': 'fixed_indegree', 'indegree': KE},
syn_spec={'weight': J, 'delay': dt})
nest.Connect(nodes_in, allnodes,
conn_spec={'rule': 'fixed_indegree', 'indegree': KI},
syn_spec={'weight': -g * J, 'delay': dt})
###############################################################################
# Afterwards we create a ``poisson_generator`` that provides spikes (the external
# input) to the neurons until time ``T`` is reached.
# Afterwards a ``dc_generator``, which is also connected to the whole population,
# provides a stong hyperpolarisation step for a short time period ``fade_out``.
#
# The ``fade_out`` period has to last at least twice as long as the simulation
# resolution to supress the neurons from firing.
ext = nest.Create("poisson_generator",
params={'rate': rate_ext, 'stop': T})
nest.Connect(ext, allnodes,
syn_spec={'weight': Jext, 'delay': dt})
suppr = nest.Create("dc_generator",
params={'amplitude': -1e16, 'start': T,
'stop': T + fade_out})
nest.Connect(suppr, allnodes)
spikedetector = nest.Create("spike_detector")
nest.Connect(allnodes, spikedetector)
###############################################################################
# We then create the ``spike_generator``, which provides the extra spike
# (perturbation).
stimulus = nest.Create("spike_generator")
stimulus.spike_times = []
###############################################################################
# We need to reset the random number generator and the clock of
# the simulation Kernel. In addition, we ensure that there is no spike left in
# the spike detector.
nest.SetKernelStatus({"rng_seeds": [seed_NEST], 'time': 0.0})
spikedetector.n_events = 0
# We assign random initial membrane potentials to all neurons
numpy.random.seed(seed_numpy)
Vms = Vmin + (Vmax - Vmin) * numpy.random.rand(N)
allnodes.V_m = Vms
##############################################################################
# In the second trial, we add an extra input spike at time ``t_stim`` to the
# neuron that fires first after perturbation time ``t_stim``. Thus, we make sure
# that the perturbation is transmitted to the network before it fades away in
# the perturbed neuron. (Single IAF-neurons are not chaotic.)
if trial == 1:
id_stim = [senders[0][spiketimes[0] > t_stim][0]]
nest.Connect(stimulus, nest.NodeCollection(id_stim),
syn_spec={'weight': Jstim, 'delay': dt})
stimulus.spike_times = [t_stim]
# Now we simulate the network and add a fade out period to discard
# remaining spikes.
nest.Simulate(T)
nest.Simulate(fade_out)
# Storing the data.
senders += [spikedetector.get('events', 'senders')]
spiketimes += [spikedetector.get('events', 'times')]
###############################################################################
# We plot the spiking activity of the network (first trial in red, second trial
# in black).
plt.figure(1)
plt.clf()
plt.plot(spiketimes[0], senders[0], 'ro', ms=4.)
plt.plot(spiketimes[1], senders[1], 'ko', ms=2.)
plt.xlabel('time (ms)')
plt.ylabel('neuron id')
plt.xlim((0, T))
plt.ylim((0, N))
plt.show()
| gpl-2.0 |
ieguinoa/tools-iuc | tools/fsd/td.py | 17 | 59714 | #!/usr/bin/env python
# Tag distance analysis of SSCSs
#
# Author: Monika Heinzl, Johannes-Kepler University Linz (Austria)
# Contact: [email protected]
#
# Takes at least one TABULAR file with tags before the alignment to the SSCS and
# optionally a second TABULAR file as input. The program produces a plot which shows a histogram of Hamming distances
# separated after family sizes, a family size distribution separated after Hamming distances for all (sample_size=0)
# or a given sample of SSCSs or SSCSs, which form a DCS. In additon, the tool produces HD and FSD plots for the
# difference between the HDs of both parts of the tags and for the chimeric reads and finally a CSV file with the
# data of the plots. It is also possible to perform the HD analysis with shortened tags with given sizes as input.
# The tool can run on a certain number of processors, which can be defined by the user.
# USAGE: python td.py --inputFile filename --inputName1 filename --sample_size int /
# --only_DCS True --FamilySize3 True --subset_tag True --nproc int --minFS int --maxFS int
# --nr_above_bars True/False --output_tabular outptufile_name_tabular
import argparse
import operator
import sys
from collections import Counter, defaultdict
from functools import partial
from multiprocessing.pool import Pool
import matplotlib.pyplot as plt
import numpy
from matplotlib.backends.backend_pdf import PdfPages
plt.switch_backend('agg')
def plotFSDwithHD2(familySizeList1, maximumXFS, minimumXFS, originalCounts,
subtitle, pdf, relative=False, diff=True, rel_freq=False):
if diff is False:
colors = ["#e6194b", "#3cb44b", "#ffe119", "#0082c8", "#f58231", "#911eb4"]
labels = ["TD=1", "TD=2", "TD=3", "TD=4", "TD=5-8", "TD>8"]
else:
colors = ["#93A6AB", "#403C14", "#731E41", "#BAB591", "#085B6F", "#E8AA35", "#726C66"]
if relative is True:
labels = ["d=0", "d=0.1", "d=0.2", "d=0.3", "d=0.4", "d=0.5-0.8", "d>0.8"]
else:
labels = ["d=0", "d=1", "d=2", "d=3", "d=4", "d=5-8", "d>8"]
fig = plt.figure(figsize=(6, 7))
ax = fig.add_subplot(111)
plt.subplots_adjust(bottom=0.1)
p1 = numpy.bincount(numpy.concatenate(familySizeList1))
maximumY = numpy.amax(p1)
if len(range(minimumXFS, maximumXFS)) == 0:
range1 = range(minimumXFS - 1, minimumXFS + 2)
else:
range1 = range(0, maximumXFS + 2)
if rel_freq:
w = [numpy.zeros_like(data) + 1. / len(numpy.concatenate(familySizeList1)) for data in familySizeList1]
plt.hist(familySizeList1, label=labels, weights=w, color=colors, stacked=True, rwidth=0.8, alpha=1, align="left", edgecolor="None", bins=range1)
plt.ylabel("Relative Frequency", fontsize=14)
plt.ylim((0, 1.07))
else:
plt.hist(familySizeList1, label=labels, color=colors, stacked=True, rwidth=0.8, alpha=1, align="left", edgecolor="None", bins=range1)
if len(numpy.concatenate(familySizeList1)) != 0:
plt.ylim((0, max(numpy.bincount(numpy.concatenate(familySizeList1))) * 1.1))
plt.ylabel("Absolute Frequency", fontsize=14)
plt.ylim((0, maximumY * 1.2))
plt.legend(loc='upper right', fontsize=14, frameon=True, bbox_to_anchor=(1.45, 1))
plt.suptitle(subtitle, y=1, x=0.5, fontsize=14)
plt.xlabel("Family size", fontsize=14)
ticks = numpy.arange(0, maximumXFS + 1, 1)
ticks1 = [str(_) for _ in ticks]
if maximumXFS >= 20:
ticks1[len(ticks1) - 1] = ">=20"
plt.xticks(numpy.array(ticks), ticks1)
[l.set_visible(False) for (i, l) in enumerate(ax.get_xticklabels()) if i % 5 != 0]
plt.xlim((0, maximumXFS + 1))
legend = "\nfamily size: \nabsolute frequency: \nrelative frequency: "
plt.text(0.15, -0.08, legend, size=12, transform=plt.gcf().transFigure)
# count = numpy.bincount(originalCounts) # original counts
if max(originalCounts) >= 20:
max_count = ">= 20"
else:
max_count = max(originalCounts)
legend1 = "{}\n{}\n{:.5f}".format(max_count, p1[len(p1) - 1], float(p1[len(p1) - 1]) / sum(p1))
plt.text(0.5, -0.08, legend1, size=12, transform=plt.gcf().transFigure)
legend3 = "singletons\n{:,}\n{:.5f}".format(int(p1[1]), float(p1[1]) / sum(p1))
plt.text(0.7, -0.08, legend3, transform=plt.gcf().transFigure, size=12)
plt.grid(b=True, which='major', color='#424242', linestyle=':')
pdf.savefig(fig, bbox_inches="tight")
plt.close("all")
def plotHDwithFSD(list1, maximumX, minimumX, subtitle, lenTags, pdf, xlabel, relative=False,
nr_above_bars=True, nr_unique_chimeras=0, len_sample=0, rel_freq=False):
if relative is True:
step = 0.1
else:
step = 1
fig = plt.figure(figsize=(6, 8))
plt.subplots_adjust(bottom=0.1)
p1 = numpy.array([v for k, v in sorted(Counter(numpy.concatenate(list1)).items())])
maximumY = numpy.amax(p1)
if relative is True: # relative difference
bin1 = numpy.arange(-1, maximumX + 0.2, 0.1)
else:
bin1 = maximumX + 1
if rel_freq:
w = [numpy.zeros_like(data) + 1. / len(numpy.concatenate(list1)) for data in list1]
counts = plt.hist(list1, bins=bin1, edgecolor='black', linewidth=1, weights=w,
label=["FS=1", "FS=2", "FS=3", "FS=4", "FS=5-10", "FS>10"], rwidth=0.8,
color=["#808080", "#FFFFCC", "#FFBF00", "#DF0101", "#0431B4", "#86B404"],
stacked=True, alpha=1, align="left", range=(0, maximumX + 1))
plt.ylim((0, 1.07))
plt.ylabel("Relative Frequency", fontsize=14)
bins = counts[1] # width of bins
counts = numpy.array(map(float, counts[0][5]))
else:
counts = plt.hist(list1, bins=bin1, edgecolor='black', linewidth=1,
label=["FS=1", "FS=2", "FS=3", "FS=4", "FS=5-10", "FS>10"], rwidth=0.8,
color=["#808080", "#FFFFCC", "#FFBF00", "#DF0101", "#0431B4", "#86B404"],
stacked=True, alpha=1, align="left", range=(0, maximumX + 1))
maximumY = numpy.amax(p1)
plt.ylim((0, maximumY * 1.2))
plt.ylabel("Absolute Frequency", fontsize=14)
bins = counts[1] # width of bins
counts = numpy.array([int(_) for _ in counts[0][5]])
plt.legend(loc='upper right', fontsize=14, frameon=True, bbox_to_anchor=(1.45, 1))
plt.suptitle(subtitle, y=1, x=0.5, fontsize=14)
plt.xlabel(xlabel, fontsize=14)
plt.grid(b=True, which='major', color='#424242', linestyle=':')
plt.xlim((minimumX - step, maximumX + step))
plt.xticks(numpy.arange(0, maximumX + step, step))
if nr_above_bars:
bin_centers = -0.4 * numpy.diff(bins) + bins[:-1]
for x_label, label in zip(counts, bin_centers): # labels for values
if x_label == 0:
continue
else:
if rel_freq:
plt.annotate("{:,}\n{:.3f}".format(int(round(x_label * len(numpy.concatenate(list1)))),
float(x_label)),
xy=(label, x_label + len(numpy.concatenate(list1)) * 0.0001),
xycoords="data", color="#000066", fontsize=10)
else:
plt.annotate("{:,}\n{:.3f}".format(x_label, float(x_label) / sum(counts)),
xy=(label, x_label + len(numpy.concatenate(list1)) * 0.01),
xycoords="data", color="#000066", fontsize=10)
if nr_unique_chimeras != 0:
if (relative and ((counts[len(counts) - 1] / nr_unique_chimeras) == 2)) or \
(sum(counts) / nr_unique_chimeras) == 2:
legend = "nr. of tags = {:,}\nsample size = {:,}\nnr. of data points = {:,}\nnr. of CF = {:,} ({:,})"\
.format(lenTags, len_sample, len(numpy.concatenate(list1)), nr_unique_chimeras, nr_unique_chimeras * 2)
else:
legend = "nr. of tags = {:,}\nsample size = {:,}\nnr. of data points = {:,}\nnr. of CF = {:,}".format(
lenTags, len_sample, len(numpy.concatenate(list1)), nr_unique_chimeras)
else:
legend = "nr. of tags = {:,}\nsample size = {:,}\nnr. of data points = {:,}".format(
lenTags, len_sample, len(numpy.concatenate(list1)))
plt.text(0.14, -0.07, legend, size=12, transform=plt.gcf().transFigure)
pdf.savefig(fig, bbox_inches="tight")
plt.close("all")
plt.clf()
def plotHDwithDCS(list1, maximumX, minimumX, subtitle, lenTags, pdf, xlabel, relative=False,
nr_above_bars=True, nr_unique_chimeras=0, len_sample=0, rel_freq=False):
step = 1
fig = plt.figure(figsize=(6, 8))
plt.subplots_adjust(bottom=0.1)
p1 = numpy.array([v for k, v in sorted(Counter(numpy.concatenate(list1)).items())])
maximumY = numpy.amax(p1)
bin1 = maximumX + 1
if rel_freq:
w = [numpy.zeros_like(data) + 1. / len(numpy.concatenate(list1)) for data in list1]
counts = plt.hist(list1, bins=bin1, edgecolor='black', linewidth=1, weights=w,
label=["DCS", "ab", "ba"], rwidth=0.8, color=["#FF0000", "#5FB404", "#FFBF00"],
stacked=True, alpha=1, align="left", range=(0, maximumX + 1))
plt.ylim((0, 1.07))
plt.ylabel("Relative Frequency", fontsize=14)
bins = counts[1] # width of bins
counts = numpy.array([float(_) for _ in counts[0][2]])
else:
counts = plt.hist(list1, bins=bin1, edgecolor='black', linewidth=1,
label=["DCS", "ab", "ba"], rwidth=0.8, color=["#FF0000", "#5FB404", "#FFBF00"],
stacked=True, alpha=1, align="left", range=(0, maximumX + 1))
plt.ylim((0, maximumY * 1.2))
plt.ylabel("Absolute Frequency", fontsize=14)
bins = counts[1] # width of bins
counts = numpy.array([int(_) for _ in counts[0][2]])
plt.legend(loc='upper right', fontsize=14, frameon=True, bbox_to_anchor=(1.45, 1))
plt.suptitle(subtitle, y=1, x=0.5, fontsize=14)
plt.xlabel(xlabel, fontsize=14)
plt.grid(b=True, which='major', color='#424242', linestyle=':')
plt.xlim((minimumX - step, maximumX + step))
plt.xticks(numpy.arange(0, maximumX + step, step))
if nr_above_bars:
bin_centers = -0.4 * numpy.diff(bins) + bins[:-1]
for x_label, label in zip(counts, bin_centers): # labels for values
if x_label == 0:
continue
else:
if rel_freq:
plt.annotate("{:,}\n{:.3f}".format(int(round(x_label * len(numpy.concatenate(list1)))),
float(x_label)),
xy=(label, x_label + len(numpy.concatenate(list1)) * 0.0001),
xycoords="data", color="#000066", fontsize=10)
else:
plt.annotate("{:,}\n{:.3f}".format(x_label, float(x_label) / sum(counts)),
xy=(label, x_label + len(numpy.concatenate(list1)) * 0.01),
xycoords="data", color="#000066", fontsize=10)
if nr_unique_chimeras != 0:
if (sum(counts) / nr_unique_chimeras) == 2:
legend = "nr. of tags = {:,}\nsample size = {:,}\nnr. of data points = {:,}\nnr. of CF = {:,} ({:,})".\
format(lenTags, len_sample, len(numpy.concatenate(list1)), nr_unique_chimeras, nr_unique_chimeras * 2)
else:
legend = "nr. of tags = {:,}\nsample size = {:,}\nnr. of data points = {:,}\nnr. of CF = {:,}".format(
lenTags, len_sample, len(numpy.concatenate(list1)), nr_unique_chimeras)
else:
legend = "nr. of tags = {:,}\nsample size = {:,}\nnr. of data points = {:,}".format(
lenTags, len_sample, len(numpy.concatenate(list1)))
plt.text(0.14, -0.07, legend, size=12, transform=plt.gcf().transFigure)
legend2 = "SSCS ab = {:,} ({:.5f})\nSSCS ba = {:,} ({:.5f})\nDCS = {:,} ({:.5f})".format(
len(list1[1]), len(list1[1]) / float(nr_unique_chimeras),
len(list1[2]), len(list1[2]) / float(nr_unique_chimeras),
len(list1[0]), len(list1[0]) / float(nr_unique_chimeras))
plt.text(0.6, -0.047, legend2, size=12, transform=plt.gcf().transFigure)
pdf.savefig(fig, bbox_inches="tight")
plt.close("all")
plt.clf()
def plotHDwithinSeq(sum1, sum1min, sum2, sum2min, min_value, lenTags, pdf, len_sample, rel_freq=False):
fig = plt.figure(figsize=(6, 8))
plt.subplots_adjust(bottom=0.1)
ham_partial = [sum1, sum1min, sum2, sum2min, numpy.array(min_value)] # new hd within tags
maximumX = numpy.amax(numpy.concatenate(ham_partial))
minimumX = numpy.amin(numpy.concatenate(ham_partial))
if len(range(minimumX, maximumX)) == 0:
range1 = minimumX
else:
range1 = range(minimumX, maximumX + 2)
if rel_freq:
w = [numpy.zeros_like(data) + 1. / len(data) for data in ham_partial]
plt.hist(ham_partial, align="left", rwidth=0.8, stacked=False, weights=w,
label=["TD a.min", "TD b.max", "TD b.min", "TD a.max", "TD a.min + b.max,\nTD a.max + b.min"],
bins=range1, color=["#58ACFA", "#0404B4", "#FE642E", "#B40431", "#585858"],
edgecolor='black', linewidth=1)
plt.ylabel("Relative Frequency", fontsize=14)
plt.ylim(0, 1.07)
else:
plt.hist(ham_partial, align="left", rwidth=0.8, stacked=False,
label=["TD a.min", "TD b.max", "TD b.min", "TD a.max", "TD a.min + b.max,\nTD a.max + b.min"],
bins=range1, color=["#58ACFA", "#0404B4", "#FE642E", "#B40431", "#585858"],
edgecolor='black', linewidth=1)
plt.ylabel("Absolute Frequency", fontsize=14)
plt.legend(loc='upper right', fontsize=14, frameon=True, bbox_to_anchor=(1.6, 1))
plt.suptitle('Tag distances within tags', fontsize=14)
plt.xlabel("TD", fontsize=14)
plt.grid(b=True, which='major', color='#424242', linestyle=':')
plt.xlim((minimumX - 1, maximumX + 1))
plt.xticks(numpy.arange(0, maximumX + 1, 1.0))
legend = "nr. of tags = {:,}\nsample size = {:,}\nnr. of data points = {:,}".format(
lenTags, len_sample, len(numpy.concatenate(ham_partial)))
plt.text(0.14, -0.05, legend, size=12, transform=plt.gcf().transFigure)
pdf.savefig(fig, bbox_inches="tight")
plt.close("all")
plt.clf()
def createTableFSD2(list1, diff=True):
selfAB = numpy.concatenate(list1)
uniqueFS = numpy.unique(selfAB)
nr = numpy.arange(0, len(uniqueFS), 1)
if diff is False:
count = numpy.zeros((len(uniqueFS), 6))
else:
count = numpy.zeros((len(uniqueFS), 7))
state = 1
for i in list1:
counts = list(Counter(i).items())
hd = [item[0] for item in counts]
c = [item[1] for item in counts]
table = numpy.column_stack((hd, c))
if len(table) == 0:
state = state + 1
continue
else:
if state == 1:
for k, l in zip(uniqueFS, nr):
for j in table:
if j[0] == uniqueFS[l]:
count[l, 0] = j[1]
if state == 2:
for k, l in zip(uniqueFS, nr):
for j in table:
if j[0] == uniqueFS[l]:
count[l, 1] = j[1]
if state == 3:
for k, l in zip(uniqueFS, nr):
for j in table:
if j[0] == uniqueFS[l]:
count[l, 2] = j[1]
if state == 4:
for k, l in zip(uniqueFS, nr):
for j in table:
if j[0] == uniqueFS[l]:
count[l, 3] = j[1]
if state == 5:
for k, l in zip(uniqueFS, nr):
for j in table:
if j[0] == uniqueFS[l]:
count[l, 4] = j[1]
if state == 6:
for k, l in zip(uniqueFS, nr):
for j in table:
if j[0] == uniqueFS[l]:
count[l, 5] = j[1]
if state == 7:
for k, l in zip(uniqueFS, nr):
for j in table:
if j[0] == uniqueFS[l]:
count[l, 6] = j[1]
state = state + 1
sumRow = count.sum(axis=1)
sumCol = count.sum(axis=0)
uniqueFS = uniqueFS.astype(str)
if uniqueFS[len(uniqueFS) - 1] == "20":
uniqueFS[len(uniqueFS) - 1] = ">20"
first = ["FS={}".format(i) for i in uniqueFS]
final = numpy.column_stack((first, count, sumRow))
return (final, sumCol)
def createFileFSD2(summary, sumCol, overallSum, output_file, name, sep, rel=False, diff=True):
output_file.write(name)
output_file.write("\n")
if diff is False:
output_file.write("{}TD=1{}TD=2{}TD=3{}TD=4{}TD=5-8{}TD>8{}sum{}\n".format(
sep, sep, sep, sep, sep, sep, sep, sep))
else:
if rel is False:
output_file.write("{}diff=0{}diff=1{}diff=2{}diff=3{}diff=4{}diff=5-8{}diff>8{}sum{}\n".format(
sep, sep, sep, sep, sep, sep, sep, sep, sep))
else:
output_file.write("{}diff=0{}diff=0.1{}diff=0.2{}diff=0.3{}diff=0.4{}diff=0.5-0.8{}diff>0.8{}sum{}\n".
format(sep, sep, sep, sep, sep, sep, sep, sep, sep))
for item in summary:
for nr in item:
if "FS" not in nr and "diff" not in nr:
nr = nr.astype(float)
nr = nr.astype(int)
output_file.write("{}{}".format(nr, sep))
output_file.write("\n")
output_file.write("sum{}".format(sep))
sumCol = map(int, sumCol)
for el in sumCol:
output_file.write("{}{}".format(el, sep))
output_file.write("{}{}".format(overallSum.astype(int), sep))
output_file.write("\n\n")
def createTableHD(list1, row_label):
selfAB = numpy.concatenate(list1)
uniqueHD = numpy.unique(selfAB)
nr = numpy.arange(0, len(uniqueHD), 1)
count = numpy.zeros((len(uniqueHD), 6))
state = 1
for i in list1:
counts = list(Counter(i).items())
hd = [item[0] for item in counts]
c = [item[1] for item in counts]
table = numpy.column_stack((hd, c))
if len(table) == 0:
state = state + 1
continue
else:
if state == 1:
for k, l in zip(uniqueHD, nr):
for j in table:
if j[0] == uniqueHD[l]:
count[l, 0] = j[1]
if state == 2:
for k, l in zip(uniqueHD, nr):
for j in table:
if j[0] == uniqueHD[l]:
count[l, 1] = j[1]
if state == 3:
for k, l in zip(uniqueHD, nr):
for j in table:
if j[0] == uniqueHD[l]:
count[l, 2] = j[1]
if state == 4:
for k, l in zip(uniqueHD, nr):
for j in table:
if j[0] == uniqueHD[l]:
count[l, 3] = j[1]
if state == 5:
for k, l in zip(uniqueHD, nr):
for j in table:
if j[0] == uniqueHD[l]:
count[l, 4] = j[1]
if state == 6:
for k, l in zip(uniqueHD, nr):
for j in table:
if j[0] == uniqueHD[l]:
count[l, 5] = j[1]
state = state + 1
sumRow = count.sum(axis=1)
sumCol = count.sum(axis=0)
first = ["{}{}".format(row_label, i) for i in uniqueHD]
final = numpy.column_stack((first, count, sumRow))
return (final, sumCol)
def createTableHDwithTags(list1):
selfAB = numpy.concatenate(list1)
uniqueHD = numpy.unique(selfAB)
nr = numpy.arange(0, len(uniqueHD), 1)
count = numpy.zeros((len(uniqueHD), 5))
state = 1
for i in list1:
counts = list(Counter(i).items())
hd = [item[0] for item in counts]
c = [item[1] for item in counts]
table = numpy.column_stack((hd, c))
if len(table) == 0:
state = state + 1
continue
else:
if state == 1:
for k, l in zip(uniqueHD, nr):
for j in table:
if j[0] == uniqueHD[l]:
count[l, 0] = j[1]
if state == 2:
for k, l in zip(uniqueHD, nr):
for j in table:
if j[0] == uniqueHD[l]:
count[l, 1] = j[1]
if state == 3:
for k, l in zip(uniqueHD, nr):
for j in table:
if j[0] == uniqueHD[l]:
count[l, 2] = j[1]
if state == 4:
for k, l in zip(uniqueHD, nr):
for j in table:
if j[0] == uniqueHD[l]:
count[l, 3] = j[1]
if state == 5:
for k, l in zip(uniqueHD, nr):
for j in table:
if j[0] == uniqueHD[l]:
count[l, 4] = j[1]
state = state + 1
sumRow = count.sum(axis=1)
sumCol = count.sum(axis=0)
first = ["TD={}".format(i) for i in uniqueHD]
final = numpy.column_stack((first, count, sumRow))
return (final, sumCol)
def createTableHDwithDCS(list1):
selfAB = numpy.concatenate(list1)
uniqueHD = numpy.unique(selfAB)
nr = numpy.arange(0, len(uniqueHD), 1)
count = numpy.zeros((len(uniqueHD), len(list1)))
state = 1
for i in list1:
counts = list(Counter(i).items())
hd = [item[0] for item in counts]
c = [item[1] for item in counts]
table = numpy.column_stack((hd, c))
if len(table) == 0:
state = state + 1
continue
else:
if state == 1:
for k, l in zip(uniqueHD, nr):
for j in table:
if j[0] == uniqueHD[l]:
count[l, 0] = j[1]
if state == 2:
for k, l in zip(uniqueHD, nr):
for j in table:
if j[0] == uniqueHD[l]:
count[l, 1] = j[1]
if state == 3:
for k, l in zip(uniqueHD, nr):
for j in table:
if j[0] == uniqueHD[l]:
count[l, 2] = j[1]
state = state + 1
sumRow = count.sum(axis=1)
sumCol = count.sum(axis=0)
first = ["TD={}".format(i) for i in uniqueHD]
final = numpy.column_stack((first, count, sumRow))
return (final, sumCol)
def createFileHD(summary, sumCol, overallSum, output_file, name, sep):
output_file.write(name)
output_file.write("\n")
output_file.write("{}FS=1{}FS=2{}FS=3{}FS=4{}FS=5-10{}FS>10{}sum{}\n".format(
sep, sep, sep, sep, sep, sep, sep, sep))
for item in summary:
for nr in item:
if "TD" not in nr and "diff" not in nr:
nr = nr.astype(float)
nr = nr.astype(int)
output_file.write("{}{}".format(nr, sep))
output_file.write("\n")
output_file.write("sum{}".format(sep))
sumCol = map(int, sumCol)
for el in sumCol:
output_file.write("{}{}".format(el, sep))
output_file.write("{}{}".format(overallSum.astype(int), sep))
output_file.write("\n\n")
def createFileHDwithDCS(summary, sumCol, overallSum, output_file, name, sep):
output_file.write(name)
output_file.write("\n")
output_file.write("{}DCS{}SSCS ab{}SSCS ba{}sum{}\n".format(sep, sep, sep, sep, sep))
for item in summary:
for nr in item:
if "TD" not in nr:
nr = nr.astype(float)
nr = nr.astype(int)
output_file.write("{}{}".format(nr, sep))
output_file.write("\n")
output_file.write("sum{}".format(sep))
sumCol = map(int, sumCol)
for el in sumCol:
output_file.write("{}{}".format(el, sep))
output_file.write("{}{}".format(overallSum.astype(int), sep))
output_file.write("\n\n")
def createFileHDwithinTag(summary, sumCol, overallSum, output_file, name, sep):
output_file.write(name)
output_file.write("\n")
output_file.write("{}TD a.min{}TD b.max{}TD b.min{}TD a.max{}TD a.min + b.max, TD a.max + b.min{}sum{}\n".format(sep, sep, sep, sep, sep, sep, sep))
for item in summary:
for nr in item:
if "TD" not in nr:
nr = nr.astype(float)
nr = nr.astype(int)
output_file.write("{}{}".format(nr, sep))
output_file.write("\n")
output_file.write("sum{}".format(sep))
sumCol = map(int, sumCol)
for el in sumCol:
output_file.write("{}{}".format(el, sep))
output_file.write("{}{}".format(overallSum.astype(int), sep))
output_file.write("\n\n")
def hamming(array1, array2):
res = 99 * numpy.ones(len(array1))
i = 0
array2 = numpy.unique(array2) # remove duplicate sequences to decrease running time
for a in array1:
dist = numpy.array([sum(map(operator.ne, a, b)) for b in array2]) # fastest
res[i] = numpy.amin(dist[dist > 0]) # pick min distance greater than zero
i += 1
return res
def hamming_difference(array1, array2, mate_b):
array2 = numpy.unique(array2) # remove duplicate sequences to decrease running time
array1_half = numpy.array([i[0:int(len(i) / 2)] for i in array1]) # mate1 part1
array1_half2 = numpy.array([i[int(len(i) / 2):len(i)] for i in array1]) # mate1 part 2
array2_half = numpy.array([i[0:int(len(i) / 2)] for i in array2]) # mate2 part1
array2_half2 = numpy.array([i[int(len(i) / 2):len(i)] for i in array2]) # mate2 part2
diff11 = []
relativeDiffList = []
ham1 = []
ham2 = []
ham1min = []
ham2min = []
min_valueList = []
min_tagsList = []
diff11_zeros = []
min_tagsList_zeros = []
max_tag_list = []
i = 0 # counter, only used to see how many HDs of tags were already calculated
if mate_b is False: # HD calculation for all a's
half1_mate1 = array1_half
half2_mate1 = array1_half2
half1_mate2 = array2_half
half2_mate2 = array2_half2
elif mate_b is True: # HD calculation for all b's
half1_mate1 = array1_half2
half2_mate1 = array1_half
half1_mate2 = array2_half2
half2_mate2 = array2_half
for a, b, tag in zip(half1_mate1, half2_mate1, array1):
# exclude identical tag from array2, to prevent comparison to itself
sameTag = numpy.where(array2 == tag)[0]
indexArray2 = numpy.arange(0, len(array2), 1)
index_withoutSame = numpy.delete(indexArray2, sameTag) # delete identical tag from the data
# all tags without identical tag
array2_half_withoutSame = half1_mate2[index_withoutSame]
array2_half2_withoutSame = half2_mate2[index_withoutSame]
array2_withoutSame = array2[index_withoutSame] # whole tag (=not splitted into 2 halfs)
# calculate HD of "a" in the tag to all "a's" or "b" in the tag to all "b's"
dist = numpy.array([sum(map(operator.ne, a, c)) for c in
array2_half_withoutSame])
min_index = numpy.where(dist == dist.min())[0] # get index of min HD
min_value = dist.min()
# get all "b's" of the tag or all "a's" of the tag with minimum HD
min_tag_half2 = array2_half2_withoutSame[min_index]
min_tag_array2 = array2_withoutSame[min_index] # get whole tag with min HD
dist_second_half = numpy.array([sum(map(operator.ne, b, e)) for e in
min_tag_half2]) # calculate HD of "b" to all "b's" or "a" to all "a's"
max_value = dist_second_half.max()
max_index = numpy.where(dist_second_half == dist_second_half.max())[0] # get index of max HD
max_tag = min_tag_array2[max_index]
# for d, d2 in zip(min_value, max_value):
if mate_b is True: # half2, corrects the variable of the HD from both halfs if it is a or b
ham2.append(min_value)
ham2min.append(max_value)
else: # half1, corrects the variable of the HD from both halfs if it is a or b
ham1.append(min_value)
ham1min.append(max_value)
min_valueList.append(min_value + max_value)
min_tagsList.append(tag)
difference1 = abs(min_value - max_value)
diff11.append(difference1)
rel_difference = round(float(difference1) / (min_value + max_value), 1)
relativeDiffList.append(rel_difference)
# tags which have identical parts:
if min_value == 0 or max_value == 0:
min_tagsList_zeros.append(numpy.array(tag))
difference1_zeros = abs(min_value - max_value) # td of non-identical part
diff11_zeros.append(difference1_zeros)
max_tag_list.append(numpy.array(max_tag))
else:
min_tagsList_zeros.append(None)
diff11_zeros.append(None)
max_tag_list.append(None)
i += 1
return ([diff11, ham1, ham2, min_valueList, min_tagsList, relativeDiffList, diff11_zeros,
min_tagsList_zeros, ham1min, ham2min, max_tag_list])
def readFileReferenceFree(file):
with open(file, 'r') as dest_f:
data_array = numpy.genfromtxt(dest_f, skip_header=0, delimiter='\t', comments='#', dtype=str)
integers = numpy.array(data_array[:, 0]).astype(int)
return (integers, data_array)
def hammingDistanceWithFS(fs, ham):
fs = numpy.asarray(fs)
maximum = max(ham)
minimum = min(ham)
ham = numpy.asarray(ham)
singletons = numpy.where(fs == 1)[0]
data = ham[singletons]
hd2 = numpy.where(fs == 2)[0]
data2 = ham[hd2]
hd3 = numpy.where(fs == 3)[0]
data3 = ham[hd3]
hd4 = numpy.where(fs == 4)[0]
data4 = ham[hd4]
hd5 = numpy.where((fs >= 5) & (fs <= 10))[0]
data5 = ham[hd5]
hd6 = numpy.where(fs > 10)[0]
data6 = ham[hd6]
list1 = [data, data2, data3, data4, data5, data6]
return (list1, maximum, minimum)
def familySizeDistributionWithHD(fs, ham, diff=False, rel=True):
hammingDistances = numpy.unique(ham)
fs = numpy.asarray(fs)
ham = numpy.asarray(ham)
bigFamilies2 = numpy.where(fs > 19)[0]
if len(bigFamilies2) != 0:
fs[bigFamilies2] = 20
maximum = max(fs)
minimum = min(fs)
if diff is True:
hd0 = numpy.where(ham == 0)[0]
data0 = fs[hd0]
if rel is True:
hd1 = numpy.where(ham == 0.1)[0]
else:
hd1 = numpy.where(ham == 1)[0]
data = fs[hd1]
if rel is True:
hd2 = numpy.where(ham == 0.2)[0]
else:
hd2 = numpy.where(ham == 2)[0]
data2 = fs[hd2]
if rel is True:
hd3 = numpy.where(ham == 0.3)[0]
else:
hd3 = numpy.where(ham == 3)[0]
data3 = fs[hd3]
if rel is True:
hd4 = numpy.where(ham == 0.4)[0]
else:
hd4 = numpy.where(ham == 4)[0]
data4 = fs[hd4]
if rel is True:
hd5 = numpy.where((ham >= 0.5) & (ham <= 0.8))[0]
else:
hd5 = numpy.where((ham >= 5) & (ham <= 8))[0]
data5 = fs[hd5]
if rel is True:
hd6 = numpy.where(ham > 0.8)[0]
else:
hd6 = numpy.where(ham > 8)[0]
data6 = fs[hd6]
if diff is True:
list1 = [data0, data, data2, data3, data4, data5, data6]
else:
list1 = [data, data2, data3, data4, data5, data6]
return (list1, hammingDistances, maximum, minimum)
def hammingDistanceWithDCS(minHD_tags_zeros, diff_zeros, data_array):
diff_zeros = numpy.array(diff_zeros)
maximum = numpy.amax(diff_zeros)
minimum = numpy.amin(diff_zeros)
minHD_tags_zeros = numpy.array(minHD_tags_zeros)
idx = numpy.concatenate([numpy.where(data_array[:, 1] == i)[0] for i in minHD_tags_zeros])
subset_data = data_array[idx, :]
seq = numpy.array(subset_data[:, 1])
# find all unique tags and get the indices for ALL tags, but only once
u, index_unique, c = numpy.unique(numpy.array(seq), return_counts=True, return_index=True)
DCS_tags = u[c == 2]
rest_tags = u[c == 1]
dcs = numpy.repeat("DCS", len(DCS_tags))
idx_sscs = numpy.concatenate([numpy.where(subset_data[:, 1] == i)[0] for i in rest_tags])
sscs = subset_data[idx_sscs, 2]
all_tags = numpy.column_stack((numpy.concatenate((DCS_tags, subset_data[idx_sscs, 1])),
numpy.concatenate((dcs, sscs))))
hd_DCS = []
ab_SSCS = []
ba_SSCS = []
for i in range(len(all_tags)):
tag = all_tags[i, :]
hd = diff_zeros[numpy.where(minHD_tags_zeros == tag[0])[0]]
if tag[1] == "DCS":
hd_DCS.append(hd)
elif tag[1] == "ab":
ab_SSCS.append(hd)
elif tag[1] == "ba":
ba_SSCS.append(hd)
if len(hd_DCS) != 0:
hd_DCS = numpy.concatenate(hd_DCS)
if len(ab_SSCS) != 0:
ab_SSCS = numpy.concatenate(ab_SSCS)
if len(ba_SSCS) != 0:
ba_SSCS = numpy.concatenate(ba_SSCS)
list1 = [hd_DCS, ab_SSCS, ba_SSCS] # list for plotting
return (list1, maximum, minimum)
def make_argparser():
parser = argparse.ArgumentParser(description='Tag distance analysis of duplex sequencing data')
parser.add_argument('--inputFile',
help='Tabular File with three columns: ab or ba, tag and family size.')
parser.add_argument('--inputName1')
parser.add_argument('--sample_size', default=1000, type=int,
help='Sample size of Tag distance analysis.')
parser.add_argument('--subset_tag', default=0, type=int,
help='The tag is shortened to the given number.')
parser.add_argument('--nproc', default=4, type=int,
help='The tool runs with the given number of processors.')
parser.add_argument('--only_DCS', action="store_false",
help='Only tags of the DCSs are included in the HD analysis')
parser.add_argument('--minFS', default=1, type=int,
help='Only tags, which have a family size greater or equal than specified, '
'are included in the HD analysis')
parser.add_argument('--maxFS', default=0, type=int,
help='Only tags, which have a family size smaller or equal than specified, '
'are included in the HD analysis')
parser.add_argument('--nr_above_bars', action="store_true",
help='If False, values above bars in the histograms are removed')
parser.add_argument('--rel_freq', action="store_false",
help='If True, the relative frequencies are displayed.')
parser.add_argument('--output_tabular', default="data.tabular", type=str,
help='Name of the tabular file.')
parser.add_argument('--output_pdf', default="data.pdf", type=str,
help='Name of the pdf file.')
parser.add_argument('--output_chimeras_tabular', default="data.tabular", type=str,
help='Name of the tabular file with all chimeric tags.')
return parser
def Hamming_Distance_Analysis(argv):
parser = make_argparser()
args = parser.parse_args(argv[1:])
file1 = args.inputFile
name1 = args.inputName1
index_size = args.sample_size
title_savedFile_pdf = args.output_pdf
title_savedFile_csv = args.output_tabular
output_chimeras_tabular = args.output_chimeras_tabular
onlyDuplicates = args.only_DCS
rel_freq = args.rel_freq
minFS = args.minFS
maxFS = args.maxFS
nr_above_bars = args.nr_above_bars
subset = args.subset_tag
nproc = args.nproc
sep = "\t"
# input checks
if index_size < 0:
print("index_size is a negative integer.")
exit(2)
if nproc <= 0:
print("nproc is smaller or equal zero")
exit(3)
if subset < 0:
print("subset_tag is smaller or equal zero.")
exit(5)
# PLOT
plt.rcParams['axes.facecolor'] = "E0E0E0" # grey background color
plt.rcParams['xtick.labelsize'] = 14
plt.rcParams['ytick.labelsize'] = 14
plt.rcParams['patch.edgecolor'] = "#000000"
plt.rc('figure', figsize=(11.69, 8.27)) # A4 format
name1 = name1.split(".tabular")[0]
with open(title_savedFile_csv, "w") as output_file, PdfPages(title_savedFile_pdf) as pdf:
print("dataset: ", name1)
integers, data_array = readFileReferenceFree(file1)
data_array = numpy.array(data_array)
print("total nr of tags:", len(data_array))
# filter tags out which contain any other character than ATCG
valid_bases = ["A", "T", "G", "C"]
tagsToDelete = []
for idx, t in enumerate(data_array[:, 1]):
for char in t:
if char not in valid_bases:
tagsToDelete.append(idx)
break
if len(tagsToDelete) != 0: # delete tags with N in the tag from data
print("nr of tags with any other character than A, T, C, G:", len(tagsToDelete),
float(len(tagsToDelete)) / len(data_array))
index_whole_array = numpy.arange(0, len(data_array), 1)
index_withoutN_inTag = numpy.delete(index_whole_array, tagsToDelete)
data_array = data_array[index_withoutN_inTag, :]
integers = integers[index_withoutN_inTag]
print("total nr of filtered tags:", len(data_array))
int_f = numpy.array(data_array[:, 0]).astype(int)
data_array = data_array[numpy.where(int_f >= minFS)]
integers = integers[integers >= minFS]
# select family size for tags
if maxFS > 0:
int_f2 = numpy.array(data_array[:, 0]).astype(int)
data_array = data_array[numpy.where(int_f2 <= maxFS)]
integers = integers[integers <= maxFS]
if onlyDuplicates is True:
tags = data_array[:, 2]
seq = data_array[:, 1]
# find all unique tags and get the indices for ALL tags, but only once
u, index_unique, c = numpy.unique(numpy.array(seq), return_counts=True, return_index=True)
d = u[c == 2]
# get family sizes, tag for duplicates
duplTags_double = integers[numpy.in1d(seq, d)]
duplTags = duplTags_double[0::2] # ab of DCS
duplTagsBA = duplTags_double[1::2] # ba of DCS
duplTags_tag = tags[numpy.in1d(seq, d)][0::2] # ab
duplTags_seq = seq[numpy.in1d(seq, d)][0::2] # ab - tags
if minFS > 1:
duplTags_tag = duplTags_tag[(duplTags >= minFS) & (duplTagsBA >= minFS)]
duplTags_seq = duplTags_seq[(duplTags >= minFS) & (duplTagsBA >= minFS)]
duplTags = duplTags[(duplTags >= minFS) & (duplTagsBA >= minFS)] # ab+ba with FS>=3
data_array = numpy.column_stack((duplTags, duplTags_seq))
data_array = numpy.column_stack((data_array, duplTags_tag))
integers = numpy.array(data_array[:, 0]).astype(int)
print("DCS in whole dataset", len(data_array))
print("min FS", min(integers))
print("max FS", max(integers))
# HD analysis for a subset of the tag
if subset > 0:
tag1 = numpy.array([i[0:int(len(i) / 2)] for i in data_array[:, 1]])
tag2 = numpy.array([i[int(len(i) / 2):len(i)] for i in data_array[:, 1]])
flanking_region_float = float((len(tag1[0]) - subset)) / 2
flanking_region = int(flanking_region_float)
if flanking_region_float % 2 == 0:
tag1_shorten = numpy.array([i[flanking_region:len(i) - flanking_region] for i in tag1])
tag2_shorten = numpy.array([i[flanking_region:len(i) - flanking_region] for i in tag2])
else:
flanking_region_rounded = int(round(flanking_region, 1))
flanking_region_rounded_end = len(tag1[0]) - subset - flanking_region_rounded
tag1_shorten = numpy.array(
[i[flanking_region:len(i) - flanking_region_rounded_end] for i in tag1])
tag2_shorten = numpy.array(
[i[flanking_region:len(i) - flanking_region_rounded_end] for i in tag2])
data_array_tag = numpy.array([i + j for i, j in zip(tag1_shorten, tag2_shorten)])
data_array = numpy.column_stack((data_array[:, 0], data_array_tag, data_array[:, 2]))
print("length of tag= ", len(data_array[0, 1]))
# select sample: if no size given --> all vs. all comparison
if index_size == 0:
result = numpy.arange(0, len(data_array), 1)
else:
numpy.random.shuffle(data_array)
unique_tags, unique_indices = numpy.unique(data_array[:, 1], return_index=True) # get only unique tags
result = numpy.random.choice(unique_indices, size=index_size,
replace=False) # array of random sequences of size=index.size
# comparison random tags to whole dataset
result1 = data_array[result, 1] # random tags
result2 = data_array[:, 1] # all tags
print("sample size= ", len(result1))
# HD analysis of whole tag
proc_pool = Pool(nproc)
chunks_sample = numpy.array_split(result1, nproc)
ham = proc_pool.map(partial(hamming, array2=result2), chunks_sample)
proc_pool.close()
proc_pool.join()
ham = numpy.concatenate(ham).astype(int)
# with open("HD_whole dataset_{}.txt".format(app_f), "w") as output_file1:
# for h, tag in zip(ham, result1):
# output_file1.write("{}\t{}\n".format(tag, h))
proc_pool_b = Pool(nproc)
diff_list_a = proc_pool_b.map(partial(hamming_difference, array2=result2, mate_b=False), chunks_sample)
diff_list_b = proc_pool_b.map(partial(hamming_difference, array2=result2, mate_b=True), chunks_sample)
proc_pool_b.close()
proc_pool_b.join()
HDhalf1 = numpy.concatenate((numpy.concatenate([item[1] for item in diff_list_a]),
numpy.concatenate([item_b[1] for item_b in diff_list_b]))).astype(int)
HDhalf2 = numpy.concatenate((numpy.concatenate([item[2] for item in diff_list_a]),
numpy.concatenate([item_b[2] for item_b in diff_list_b]))).astype(int)
minHDs = numpy.concatenate((numpy.concatenate([item[3] for item in diff_list_a]),
numpy.concatenate([item_b[3] for item_b in diff_list_b]))).astype(int)
HDhalf1min = numpy.concatenate((numpy.concatenate([item[8] for item in diff_list_a]),
numpy.concatenate([item_b[8] for item_b in diff_list_b]))).astype(int)
HDhalf2min = numpy.concatenate((numpy.concatenate([item[9] for item in diff_list_a]),
numpy.concatenate([item_b[9] for item_b in diff_list_b]))).astype(int)
rel_Diff1 = numpy.concatenate([item[5] for item in diff_list_a])
rel_Diff2 = numpy.concatenate([item[5] for item in diff_list_b])
diff1 = numpy.concatenate([item[0] for item in diff_list_a])
diff2 = numpy.concatenate([item[0] for item in diff_list_b])
diff_zeros1 = numpy.concatenate([item[6] for item in diff_list_a])
diff_zeros2 = numpy.concatenate([item[6] for item in diff_list_b])
minHD_tags = numpy.concatenate([item[4] for item in diff_list_a])
minHD_tags_zeros1 = numpy.concatenate([item[7] for item in diff_list_a])
minHD_tags_zeros2 = numpy.concatenate([item[7] for item in diff_list_b])
chimera_tags1 = sum([item[10] for item in diff_list_a], [])
chimera_tags2 = sum([item[10] for item in diff_list_b], [])
rel_Diff = []
diff_zeros = []
minHD_tags_zeros = []
diff = []
chimera_tags = []
for d1, d2, rel1, rel2, zeros1, zeros2, tag1, tag2, ctag1, ctag2 in \
zip(diff1, diff2, rel_Diff1, rel_Diff2, diff_zeros1, diff_zeros2, minHD_tags_zeros1, minHD_tags_zeros2,
chimera_tags1, chimera_tags2):
relatives = numpy.array([rel1, rel2])
absolutes = numpy.array([d1, d2])
max_idx = numpy.argmax(relatives)
rel_Diff.append(relatives[max_idx])
diff.append(absolutes[max_idx])
if all(i is not None for i in [zeros1, zeros2]):
diff_zeros.append(max(zeros1, zeros2))
minHD_tags_zeros.append(str(tag1))
tags = [ctag1, ctag2]
chimera_tags.append(tags)
elif zeros1 is not None and zeros2 is None:
diff_zeros.append(zeros1)
minHD_tags_zeros.append(str(tag1))
chimera_tags.append(ctag1)
elif zeros1 is None and zeros2 is not None:
diff_zeros.append(zeros2)
minHD_tags_zeros.append(str(tag2))
chimera_tags.append(ctag2)
chimera_tags_new = chimera_tags
data_chimeraAnalysis = numpy.column_stack((minHD_tags_zeros, chimera_tags_new))
checked_tags = []
stat_maxTags = []
with open(output_chimeras_tabular, "w") as output_file1:
output_file1.write("chimera tag\tfamily size, read direction\tsimilar tag with TD=0\n")
for i in range(len(data_chimeraAnalysis)):
tag1 = data_chimeraAnalysis[i, 0]
info_tag1 = data_array[data_array[:, 1] == tag1, :]
fs_tag1 = ["{} {}".format(t[0], t[2]) for t in info_tag1]
if tag1 in checked_tags: # skip tag if already written to file
continue
sample_half_a = tag1[0:int(len(tag1) / 2)]
sample_half_b = tag1[int(len(tag1) / 2):len(tag1)]
max_tags = data_chimeraAnalysis[i, 1]
if len(max_tags) > 1 and len(max_tags) != len(data_array[0, 1]) and type(max_tags) is not numpy.ndarray:
max_tags = numpy.concatenate(max_tags)
max_tags = numpy.unique(max_tags)
stat_maxTags.append(len(max_tags))
info_maxTags = [data_array[data_array[:, 1] == t, :] for t in max_tags]
chimera_half_a = numpy.array([t[0:int(len(t) / 2)] for t in max_tags]) # mate1 part1
chimera_half_b = numpy.array([t[int(len(t) / 2):len(t)] for t in max_tags]) # mate1 part 2
new_format = []
for j in range(len(max_tags)):
fs_maxTags = ["{} {}".format(t[0], t[2]) for t in info_maxTags[j]]
if sample_half_a == chimera_half_a[j]:
max_tag = "*{}* {} {}".format(chimera_half_a[j], chimera_half_b[j], ", ".join(fs_maxTags))
new_format.append(max_tag)
elif sample_half_b == chimera_half_b[j]:
max_tag = "{} *{}* {}".format(chimera_half_a[j], chimera_half_b[j], ", ".join(fs_maxTags))
new_format.append(max_tag)
checked_tags.append(max_tags[j])
sample_tag = "{} {}\t{}".format(sample_half_a, sample_half_b, ", ".join(fs_tag1))
output_file1.write("{}\t{}\n".format(sample_tag, ", ".join(new_format)))
checked_tags.append(tag1)
output_file1.write(
"This file contains all tags that were identified as chimeras as the first column and the "
"corresponding tags which returned a Hamming distance of zero in either the first or the second "
"half of the sample tag as the second column.\n"
"The tags were separated by an empty space into their halves and the * marks the identical half.")
output_file1.write("\n\nStatistics of nr. of tags that returned max. TD (2nd column)\n")
output_file1.write("minimum\t{}\ttag(s)\n".format(numpy.amin(numpy.array(stat_maxTags))))
output_file1.write("mean\t{}\ttag(s)\n".format(numpy.mean(numpy.array(stat_maxTags))))
output_file1.write("median\t{}\ttag(s)\n".format(numpy.median(numpy.array(stat_maxTags))))
output_file1.write("maximum\t{}\ttag(s)\n".format(numpy.amax(numpy.array(stat_maxTags))))
output_file1.write("sum\t{}\ttag(s)\n".format(numpy.sum(numpy.array(stat_maxTags))))
lenTags = len(data_array)
len_sample = len(result1)
quant = numpy.array(data_array[result, 0]).astype(int) # family size for sample of tags
seq = numpy.array(data_array[result, 1]) # tags of sample
ham = numpy.asarray(ham) # HD for sample of tags
if onlyDuplicates is True: # ab and ba strands of DCSs
quant = numpy.concatenate((quant, duplTagsBA[result]))
seq = numpy.tile(seq, 2)
ham = numpy.tile(ham, 2)
diff = numpy.tile(diff, 2)
rel_Diff = numpy.tile(rel_Diff, 2)
diff_zeros = numpy.tile(diff_zeros, 2)
nr_chimeric_tags = len(data_chimeraAnalysis)
print("nr of chimeras", nr_chimeric_tags)
# prepare data for different kinds of plots
# distribution of FSs separated after HD
familySizeList1, hammingDistances, maximumXFS, minimumXFS = familySizeDistributionWithHD(quant, ham, rel=False)
list1, maximumX, minimumX = hammingDistanceWithFS(quant, ham) # histogram of HDs separated after FS
# get FS for all tags with min HD of analysis of chimeric reads
# there are more tags than sample size in the plot, because one tag can have multiple minimas
if onlyDuplicates:
seqDic = defaultdict(list)
for s, q in zip(seq, quant):
seqDic[s].append(q)
else:
seqDic = dict(zip(seq, quant))
lst_minHD_tags = []
for i in minHD_tags:
lst_minHD_tags.append(seqDic.get(i))
if onlyDuplicates:
lst_minHD_tags = numpy.concatenate(([item[0] for item in lst_minHD_tags],
[item_b[1] for item_b in lst_minHD_tags])).astype(int)
# histogram with absolute and relative difference between HDs of both parts of the tag
listDifference1, maximumXDifference, minimumXDifference = hammingDistanceWithFS(lst_minHD_tags, diff)
listRelDifference1, maximumXRelDifference, minimumXRelDifference = hammingDistanceWithFS(lst_minHD_tags, rel_Diff)
# chimeric read analysis: tags which have TD=0 in one of the halfs
if len(minHD_tags_zeros) != 0:
lst_minHD_tags_zeros = []
for i in minHD_tags_zeros:
lst_minHD_tags_zeros.append(seqDic.get(i)) # get family size for tags of chimeric reads
if onlyDuplicates:
lst_minHD_tags_zeros = numpy.concatenate(([item[0] for item in lst_minHD_tags_zeros],
[item_b[1] for item_b in lst_minHD_tags_zeros])).astype(int)
# histogram with HD of non-identical half
listDifference1_zeros, maximumXDifference_zeros, minimumXDifference_zeros = hammingDistanceWithFS(
lst_minHD_tags_zeros, diff_zeros)
if onlyDuplicates is False:
listDCS_zeros, maximumXDCS_zeros, minimumXDCS_zeros = hammingDistanceWithDCS(minHD_tags_zeros, diff_zeros, data_array)
# plot Hamming Distance with Family size distribution
plotHDwithFSD(list1=list1, maximumX=maximumX, minimumX=minimumX, pdf=pdf, rel_freq=rel_freq,
subtitle="Tag distance separated by family size", lenTags=lenTags,
xlabel="TD", nr_above_bars=nr_above_bars, len_sample=len_sample)
# Plot FSD with separation after
plotFSDwithHD2(familySizeList1, maximumXFS, minimumXFS, rel_freq=rel_freq,
originalCounts=quant, subtitle="Family size distribution separated by Tag distance",
pdf=pdf, relative=False, diff=False)
# Plot HD within tags
plotHDwithinSeq(HDhalf1, HDhalf1min, HDhalf2, HDhalf2min, minHDs, pdf=pdf, lenTags=lenTags,
rel_freq=rel_freq, len_sample=len_sample)
# Plot difference between HD's separated after FSD
plotHDwithFSD(listDifference1, maximumXDifference, minimumXDifference, pdf=pdf,
subtitle="Delta Tag distance within tags", lenTags=lenTags, rel_freq=rel_freq,
xlabel="absolute delta TD", relative=False, nr_above_bars=nr_above_bars, len_sample=len_sample)
plotHDwithFSD(listRelDifference1, maximumXRelDifference, minimumXRelDifference, pdf=pdf,
subtitle="Chimera Analysis: relative delta Tag distance", lenTags=lenTags, rel_freq=rel_freq,
xlabel="relative delta TD", relative=True, nr_above_bars=nr_above_bars,
nr_unique_chimeras=nr_chimeric_tags, len_sample=len_sample)
# plots for chimeric reads
if len(minHD_tags_zeros) != 0:
# HD
plotHDwithFSD(listDifference1_zeros, maximumXDifference_zeros, minimumXDifference_zeros, pdf=pdf,
subtitle="Tag distance of chimeric families (CF)", rel_freq=rel_freq,
lenTags=lenTags, xlabel="TD", relative=False,
nr_above_bars=nr_above_bars, nr_unique_chimeras=nr_chimeric_tags, len_sample=len_sample)
if onlyDuplicates is False:
plotHDwithDCS(listDCS_zeros, maximumXDCS_zeros, minimumXDCS_zeros, pdf=pdf,
subtitle="Tag distance of chimeric families (CF)", rel_freq=rel_freq,
lenTags=lenTags, xlabel="TD", relative=False,
nr_above_bars=nr_above_bars, nr_unique_chimeras=nr_chimeric_tags, len_sample=len_sample)
# print all data to a CSV file
# HD
summary, sumCol = createTableHD(list1, "TD=")
overallSum = sum(sumCol) # sum of columns in table
# FSD
summary5, sumCol5 = createTableFSD2(familySizeList1, diff=False)
overallSum5 = sum(sumCol5)
# HD of both parts of the tag
summary9, sumCol9 = createTableHDwithTags([HDhalf1, HDhalf1min, HDhalf2, HDhalf2min, numpy.array(minHDs)])
overallSum9 = sum(sumCol9)
# HD
# absolute difference
summary11, sumCol11 = createTableHD(listDifference1, "diff=")
overallSum11 = sum(sumCol11)
# relative difference and all tags
summary13, sumCol13 = createTableHD(listRelDifference1, "diff=")
overallSum13 = sum(sumCol13)
# chimeric reads
if len(minHD_tags_zeros) != 0:
# absolute difference and tags where at least one half has HD=0
summary15, sumCol15 = createTableHD(listDifference1_zeros, "TD=")
overallSum15 = sum(sumCol15)
if onlyDuplicates is False:
summary16, sumCol16 = createTableHDwithDCS(listDCS_zeros)
overallSum16 = sum(sumCol16)
output_file.write("{}\n".format(name1))
output_file.write("nr of tags{}{:,}\nsample size{}{:,}\n\n".format(sep, lenTags, sep, len_sample))
# HD
createFileHD(summary, sumCol, overallSum, output_file,
"Tag distance separated by family size", sep)
# FSD
createFileFSD2(summary5, sumCol5, overallSum5, output_file,
"Family size distribution separated by Tag distance", sep,
diff=False)
# output_file.write("{}{}\n".format(sep, name1))
output_file.write("\n")
max_fs = numpy.bincount(integers[result])
output_file.write("max. family size in sample:{}{}\n".format(sep, max(integers[result])))
output_file.write("absolute frequency:{}{}\n".format(sep, max_fs[len(max_fs) - 1]))
output_file.write(
"relative frequency:{}{}\n\n".format(sep, float(max_fs[len(max_fs) - 1]) / sum(max_fs)))
# HD within tags
output_file.write(
"Chimera Analysis:\nThe tags are splitted into two halves (part a and b) for which the Tag distances (TD) are calculated seperately.\n"
"The tag distance of the first half (part a) is calculated by comparing part a of the tag in the sample against all a parts in the dataset and by selecting the minimum value (TD a.min).\n"
"In the next step, we select those tags that showed the minimum TD and estimate the TD for the second half (part b) of the tag by comparing part b against the previously selected subset.\n"
"The maximum value represents then TD b.max. Finally, these process is repeated but starting with part b instead and TD b.min and TD a.max are calculated.\n"
"Next, the absolute differences between TD a.min & TD b.max and TD b.min & TD a.max are estimated (delta HD).\n"
"These are then divided by the sum of both parts (TD a.min + TD b.max or TD b.min + TD a.max, respectively) which give the relative differences between the partial HDs (rel. delta HD).\n"
"For simplicity, we used the maximum value of the relative differences and the respective delta HD.\n"
"Note that when only tags that can form a DCS are included in the analysis, the family sizes for both directions (ab and ba) of the strand will be included in the plots.\n")
output_file.write("\nlength of one half of the tag{}{}\n\n".format(sep, int(len(data_array[0, 1]) / 2)))
createFileHDwithinTag(summary9, sumCol9, overallSum9, output_file,
"Tag distance of each half in the tag", sep)
createFileHD(summary11, sumCol11, overallSum11, output_file,
"Absolute delta Tag distance within the tag", sep)
createFileHD(summary13, sumCol13, overallSum13, output_file,
"Chimera analysis: relative delta Tag distance", sep)
if len(minHD_tags_zeros) != 0:
output_file.write(
"All tags are filtered and only those tags where one half is identical (TD=0) and therefore, have a relative delta TD of 1, are kept.\n"
"These tags are considered as chimeras.\n")
createFileHD(summary15, sumCol15, overallSum15, output_file,
"Tag distance of chimeric families separated after FS", sep)
if onlyDuplicates is False:
createFileHDwithDCS(summary16, sumCol16, overallSum16, output_file,
"Tag distance of chimeric families separated after DCS and single SSCS (ab, ba)", sep)
output_file.write("\n")
if __name__ == '__main__':
sys.exit(Hamming_Distance_Analysis(sys.argv))
| mit |
elenita1221/BDA_py_demos | demos_ch5/demo5_1.py | 19 | 5055 | """Bayesian Data Analysis, 3rd ed
Chapter 5, demo 1
Hierarchical model for Rats experiment (BDA3, p. 102).
"""
from __future__ import division
import numpy as np
from scipy.stats import beta
from scipy.special import gammaln
import matplotlib.pyplot as plt
# Edit default plot settings (colours from colorbrewer2.org)
plt.rc('font', size=14)
plt.rc('lines', color='#377eb8', linewidth=2)
plt.rc('axes', color_cycle=(plt.rcParams['lines.color'],)) # Disable color cycle
# rat data (BDA3, p. 102)
y = np.array([
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1,
1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 5, 2,
5, 3, 2, 7, 7, 3, 3, 2, 9, 10, 4, 4, 4, 4, 4, 4, 4,
10, 4, 4, 4, 5, 11, 12, 5, 5, 6, 5, 6, 6, 6, 6, 16, 15,
15, 9, 4
])
n = np.array([
20, 20, 20, 20, 20, 20, 20, 19, 19, 19, 19, 18, 18, 17, 20, 20, 20,
20, 19, 19, 18, 18, 25, 24, 23, 20, 20, 20, 20, 20, 20, 10, 49, 19,
46, 27, 17, 49, 47, 20, 20, 13, 48, 50, 20, 20, 20, 20, 20, 20, 20,
48, 19, 19, 19, 22, 46, 49, 20, 20, 23, 19, 22, 20, 20, 20, 52, 46,
47, 24, 14
])
M = len(y)
# plot the separate and pooled models
plt.figure(figsize=(8,10))
x = np.linspace(0, 1, 250)
# separate
plt.subplot(2, 1, 1)
lines = plt.plot(x, beta.pdf(x[:,None], y[:-1] + 1, n[:-1] - y[:-1] + 1),
linewidth=1)
# highlight the last line
line1, = plt.plot(x, beta.pdf(x, y[-1] + 1, n[-1] - y[-1] + 1), 'r')
plt.legend((lines[0], line1),
(r'Posterior of $\theta_j$', r'Posterior of $\theta_{71}$'))
plt.yticks(())
plt.title('separate model')
# pooled
plt.subplot(2, 1, 2)
plt.plot(x, beta.pdf(x, y.sum() + 1, n.sum() - y.sum() + 1),
linewidth=2, label=(r'Posterior of common $\theta$'))
plt.legend()
plt.yticks(())
plt.xlabel(r'$\theta$', fontsize=20)
plt.title('pooled model')
# compute the marginal posterior of alpha and beta in the hierarchical model in a grid
A = np.linspace(0.5, 6, 100)
B = np.linspace(3, 33, 100)
# calculated in logarithms for numerical accuracy
lp = (
- 5/2 * np.log(A + B[:,None])
+ np.sum(
gammaln(A + B[:,None])
- gammaln(A)
- gammaln(B[:,None])
+ gammaln(A + y[:,None,None])
+ gammaln(B[:,None] + (n - y)[:,None,None])
- gammaln(A + B[:,None] + n[:,None,None]),
axis=0
)
)
# subtract the maximum value to avoid over/underflow in exponentation
lp -= lp.max()
p = np.exp(lp)
# plot the marginal posterior
fig = plt.figure()
plt.imshow(p, origin='lower', aspect='auto', extent=(A[0], A[-1], B[0], B[-1]))
plt.xlabel(r'$\alpha$', fontsize=20)
plt.ylabel(r'$\beta$', fontsize=20)
plt.title('The marginal posterior of alpha and beta in hierarchical model')
# sample from the posterior grid of alpha and beta
nsamp = 1000
samp_indices = np.unravel_index(
np.random.choice(p.size, size=nsamp, p=p.ravel()/p.sum()),
p.shape
)
samp_A = A[samp_indices[1]]
samp_B = B[samp_indices[0]]
# add random jitter, see BDA3 p. 76
samp_A += (np.random.rand(nsamp) - 0.5) * (A[1]-A[0])
samp_B += (np.random.rand(nsamp) - 0.5) * (B[1]-B[0])
# Plot samples from the distribution of distributions Beta(alpha,beta),
# that is, plot Beta(alpha,beta) using the posterior samples of alpha and beta
fig = plt.figure(figsize=(8,10))
plt.subplot(2, 1, 1)
plt.plot(x, beta.pdf(x[:,None], samp_A[:20], samp_B[:20]), linewidth=1)
plt.yticks(())
plt.title(r'Posterior samples from the distribution of distributions '
r'Beta($\alpha$,$\beta$)')
# The average of above distributions, is the predictive distribution for a new
# theta, and also the prior distribution for theta_j.
# Plot this.
plt.subplot(2, 1, 2)
plt.plot(x, np.mean(beta.pdf(x, samp_A[:,None], samp_B[:,None]), axis=0))
plt.yticks(())
plt.xlabel(r'$\theta$', fontsize=20)
plt.title(r'Predictive distribution for a new $\theta$ '
r'and prior for $\theta_j$')
# And finally compare the separate model and hierarchical model
plt.figure(figsize=(8,10))
x = np.linspace(0, 1, 250)
# first plot the separate model (same as above)
plt.subplot(2, 1, 1)
# note that for clarity only every 7th distribution is plotted
plt.plot(x, beta.pdf(x[:,None], y[7:-1:7] + 1, n[7:-1:7] - y[7:-1:7] + 1),
linewidth=1)
# highlight the last line
plt.plot(x, beta.pdf(x, y[-1] + 1, n[-1] - y[-1] + 1), 'r')
plt.yticks(())
plt.title('separate model')
# And the hierarchical model. Note that these marginal posteriors for theta_j are
# more narrow than in separate model case, due to borrowed information from
# the other theta_j's.
plt.subplot(2, 1, 2)
# note that for clarity only every 7th distribution is plotted
lines = plt.plot(
x,
np.mean(
beta.pdf(
x[:,None],
y[7::7] + samp_A[:,None,None],
n[7::7] - y[7::7] + samp_B[:,None,None]
),
axis=0
),
linewidth=1,
)
# highlight the last line
lines[-1].set_linewidth(2)
lines[-1].set_color('r')
plt.yticks(())
plt.xlabel(r'$\theta$', fontsize=20)
plt.title('hierarchical model')
plt.show()
| gpl-3.0 |
brohrer/becca | becca/tools.py | 1 | 9057 | """
Constants and functions for use across the Becca core.
"""
from __future__ import print_function
import os
import sys
import matplotlib.pyplot as plt
import numpy as np
import logging
logging.basicConfig(filename='log/log.log', level=logging.DEBUG,
format='%(asctime)s %(levelname)s %(name)s %(message)s')
logger = logging.getLogger(os.path.basename(__file__))
# Shared constants
epsilon = sys.float_info.epsilon
big = 10 ** 20
max_int16 = np.iinfo(np.int16).max
def pad(arr, shape, val=0, dtype=float):
"""
Pad a numpy array to the specified shape.
Use val (default 0) to fill in the extra spaces.
Parameters
----------
arr : array of ints or floats
The array to pad.
shape : int, list of ints or tuple of ints
The shape to which to pad ``a``.
If any element of shape is 0, that size remains unchanged in
that axis. If any element of shape is < 0, the size of ``a`` in that
axis is incremented by the magnitude of that value.
val : float
The value with which to pad ``arr``. Default is 0.
dtype : dtype
The data type with which to pad ``arr``.
Returns
-------
padded : array of ints or floats
The padded version of ``arr``.
"""
# For padding a 1D array
if isinstance(shape, int):
if shape <= 0:
rows = arr.size - shape
else:
rows = shape
if rows < arr.size:
logger.warn(' '.join(['arr.size is', str(arr.size),
' but trying to pad to ',
str(rows), 'rows.']))
return arr
# Handle the case where a is a one-dimensional array
padded = np.ones(rows, dtype=dtype) * val
padded[:arr.size] = arr
return padded
# For padding arr n-D array
new_shape = shape
n_dim = len(shape)
if n_dim > 4:
logger.info(''.join([str(n_dim), ' dimensions? Now you\'re getting greedy']))
return arr
for dim, _ in enumerate(shape):
if shape[dim] <= 0:
new_shape[dim] = arr.shape[dim] - shape[dim]
else:
if new_shape[dim] < arr.shape[dim]:
logger.warn(''.join(['The variable shape in dimension ',
str(dim), ' is ', str(arr.shape[dim]),
' but you are trying to pad to ',
str(new_shape[dim]), '.']))
logger.warn('You aren\'t allowed to make it smaller.')
return arr
padded = np.ones(new_shape, dtype=dtype) * val
if len(new_shape) == 2:
padded[:arr.shape[0], :arr.shape[1]] = arr
return padded
if len(new_shape) == 3:
padded[:arr.shape[0], :arr.shape[1], :arr.shape[2]] = arr
return padded
# A maximum of 4 dimensions is enforced.
padded[:arr.shape[0], :arr.shape[1], :arr.shape[2], :arr.shape[3]] = arr
return padded
def str_to_int(exp):
"""
Convert a string to an integer.
The method is primitive, using a simple hash based on the
ordinal value of the characters and their position in the string.
Parameters
----------
exp : str
The string expression to convert to an int.
Returns
-------
sum : int
An integer that is likely (though not extremely so) to be unique
within the scope of the program.
"""
sum_ = 0
for i, character in enumerate(exp):
sum_ += i + ord(character) + i * ord(character)
return sum_
def timestr(timestep, s_per_step=.25, precise=True):
"""
Convert the number of time steps into an age.
Parameters
----------
timestep : int
The age in time steps.
s_per_step : float
The duration of each time step in seconds.
precise : bool
If True, report the age down to the second.
If False, just report the most significant unit of time.
Default is True
Returns
-------
time_str : str
The age in string format, including, as appropriate, years,
months, days, hours, minutes, and seconds.
"""
# Start by calculating the total number of seconds.
total_sec = timestep * s_per_step
sec = int(np.mod(total_sec, 60))
time_str = ' '.join([str(sec), 'sec'])
# If necessary, calculate the total number of minutes.
total_min = int(total_sec / 60)
if total_min == 0:
return time_str
min_ = int(np.mod(total_min, 60))
if precise:
time_str = ' '.join([str(min_), 'min', time_str])
else:
time_str = ' '.join([str(min_), 'min'])
# If necessary, calculate the total number of hours.
total_hr = int(total_min / 60)
if total_hr == 0:
return time_str
hr_ = int(np.mod(total_hr, 24))
if precise:
time_str = ' '.join([str(hr_), 'hr', time_str])
else:
time_str = ' '.join([str(hr_), 'hr'])
# If necessary, calculate the total number of days.
total_day = int(total_hr / 24)
if total_day == 0:
return time_str
day = int(np.mod(total_day, 30))
if precise:
time_str = ' '.join([str(day), 'dy', time_str])
else:
time_str = ' '.join([str(day), 'dy'])
# If necessary, calculate the total number of months.
total_mon = int(total_day / 30)
if total_mon == 0:
return time_str
mon = int(np.mod(total_mon, 12))
if precise:
time_str = ' '.join([str(mon), 'mo', time_str])
else:
time_str = ' '.join([str(mon), 'mo'])
# If necessary, calculate the total number of years.
yr_ = int(total_mon / 12)
if yr_ == 0:
return time_str
if precise:
time_str = ' '.join([str(yr_), 'yr', time_str])
else:
time_str = ' '.join([str(yr_), 'yr'])
return time_str
def fatigue(raw_activities, energies, fatigue_rate=3e-4, recharge_rate=1e-4):
"""
Limit the frequency and intensity of activities with a model of fatigue.
@param raw_activities: array of floats
The activities before fatigue has been applied.
@param energies: array of floats
The accumulated energy a channel has at its disposal.
@param fatigue_rate
The rate at which energy is depleted when a channel is active.
@param recharge_rate:
The rate at which energy is re-accumulated.
@return activities: array of floats
The activities after fatigue has been applied.
"""
energies -= fatigue_rate * raw_activities * energies
energies += recharge_rate * (1 - raw_activities) * (1 - energies)
activities = raw_activities * energies
return activities
def format_decimals(array):
"""
Format and print an array as a list of fixed decimal numbers in a string.
Parameters
----------
array : array of floats
The array to be formatted.
"""
if len(array.shape) == 2:
for j in range(array.shape[1]):
formatted = (' '.join(['{0},{1}:{2:.3}'.format(i, j, array[i, j])
for i in range(array.shape[0])]))
logger.info(formatted)
else:
array = array.copy().ravel()
formatted = (' '.join(['{0}:{1:.3}'.format(i, array[i])
for i in range(array.size)]))
logger.info(formatted)
def get_files_with_suffix(dir_name, suffixes):
"""
Get all of the files with a given suffix in dir recursively.
Parameters
----------
dir_name : str
The path to the directory to search.
suffixes : list of str
The set of suffixes for which files are being collected.
Returns
-------
found_filenames : list of str
The filenames, including the local path from ``dir_name``.
"""
found_filenames = []
for localpath, _, filenames in os.walk(dir_name):
for filename in filenames:
for suffix in suffixes:
if filename.endswith(suffix):
found_filenames.append(os.path.join(localpath, filename))
found_filenames.sort()
return found_filenames
def visualize_array(image_data, label='data_figure'):
"""
Produce a visual representation of the image_data matrix.
Parameters
----------
image_data : 2D array of floats
The pixel values to make into an image.
label : str
The string label to affix to the image. It is used both
to generate a figure number and as the title.
"""
# Treat nan values like zeros for display purposes
image_data = np.nan_to_num(np.copy(image_data))
fig = plt.figure(str_to_int(label))
# Diane made the brilliant suggestion to leave this plot in color.
# It looks much prettier.
plt.bone()
img = plt.imshow(image_data)
img.set_interpolation('nearest')
plt.title(label)
plt.xlabel('Max = {0:.3}, Min = {1:.3}'.format(np.max(image_data),
np.min(image_data)))
fig.show()
fig.canvas.draw()
| mit |
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/build/lib.linux-i686-2.7/matplotlib/tests/test_dviread.py | 8 | 1667 | from nose.tools import assert_equal
import matplotlib.dviread as dr
import os.path
original_find_tex_file = dr.find_tex_file
def setup():
dr.find_tex_file = lambda x: x
def teardown():
dr.find_tex_file = original_find_tex_file
def test_PsfontsMap():
filename = os.path.join(
os.path.dirname(__file__),
'baseline_images', 'dviread', 'test.map')
fontmap = dr.PsfontsMap(filename)
# Check all properties of a few fonts
for n in [1, 2, 3, 4, 5]:
key = 'TeXfont%d' % n
entry = fontmap[key]
assert_equal(entry.texname, key)
assert_equal(entry.psname, 'PSfont%d' % n)
if n not in [3, 5]:
assert_equal(entry.encoding, 'font%d.enc' % n)
elif n == 3:
assert_equal(entry.encoding, 'enc3.foo')
# We don't care about the encoding of TeXfont5, which specifies
# multiple encodings.
if n not in [1, 5]:
assert_equal(entry.filename, 'font%d.pfa' % n)
else:
assert_equal(entry.filename, 'font%d.pfb' % n)
if n == 4:
assert_equal(entry.effects, {'slant': -0.1, 'extend': 2.2})
else:
assert_equal(entry.effects, {})
# Some special cases
entry = fontmap['TeXfont6']
assert_equal(entry.filename, None)
assert_equal(entry.encoding, None)
entry = fontmap['TeXfont7']
assert_equal(entry.filename, None)
assert_equal(entry.encoding, 'font7.enc')
entry = fontmap['TeXfont8']
assert_equal(entry.filename, 'font8.pfb')
assert_equal(entry.encoding, None)
entry = fontmap['TeXfont9']
assert_equal(entry.filename, '/absolute/font9.pfb')
| mit |
BorisJeremic/Real-ESSI-Examples | analytic_solution/test_cases/Contact/Stress_Based_Contact_Verification/SoftContact_NonLinHardShear/Area/A_1e2/Normal_Stress_Plot.py | 72 | 2800 | #!/usr/bin/python
import h5py
import matplotlib.pylab as plt
import matplotlib as mpl
import sys
import numpy as np;
import matplotlib;
import math;
from matplotlib.ticker import MaxNLocator
plt.rcParams.update({'font.size': 28})
# set tick width
mpl.rcParams['xtick.major.size'] = 10
mpl.rcParams['xtick.major.width'] = 5
mpl.rcParams['xtick.minor.size'] = 10
mpl.rcParams['xtick.minor.width'] = 5
plt.rcParams['xtick.labelsize']=24
mpl.rcParams['ytick.major.size'] = 10
mpl.rcParams['ytick.major.width'] = 5
mpl.rcParams['ytick.minor.size'] = 10
mpl.rcParams['ytick.minor.width'] = 5
plt.rcParams['ytick.labelsize']=24
###############################################################
## Analytical Solution
###############################################################
# Go over each feioutput and plot each one.
thefile = "Analytical_Solution_Normal_Stress.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:];
normal_strain = -finput["/Model/Elements/Element_Outputs"][6,:];
# Configure the figure filename, according to the input filename.
outfig=thefile.replace("_","-")
outfigname=outfig.replace("h5.feioutput","pdf")
# Plot the figure. Add labels and titles.
plt.figure(figsize=(12,10))
plt.plot(normal_strain*100,normal_stress/1000,'-r',label='Analytical Solution', Linewidth=4, markersize=20)
plt.xlabel(r"Interface Type #")
plt.ylabel(r"Normal Stress $\sigma_n [kPa]$")
plt.hold(True)
###############################################################
## Numerical Solution
###############################################################
# Go over each feioutput and plot each one.
thefile = "Monotonic_Contact_Behaviour_Adding_Normal_Load.h5.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:];
normal_strain = -finput["/Model/Elements/Element_Outputs"][6,:];
# Configure the figure filename, according to the input filename.
outfig=thefile.replace("_","-")
outfigname=outfig.replace("h5.feioutput","pdf")
# Plot the figure. Add labels and titles.
plt.plot(normal_strain*100,normal_stress/1000,'-k',label='Numerical Solution', Linewidth=4, markersize=20)
plt.xlabel(r"Normal Strain [%]")
plt.ylabel(r"Normal Stress $\sigma_n [kPa]$")
#############################################################
# # # axes = plt.gca()
# # # axes.set_xlim([-7,7])
# # # axes.set_ylim([-1,1])
# outfigname = "Interface_Test_Normal_Stress.pdf";
# plt.axis([0, 5.5, 90, 101])
# legend = plt.legend()
# legend.get_frame().set_linewidth(0.0)
# legend.get_frame().set_facecolor('none')
plt.legend()
plt.savefig('Normal_Stress.pdf', bbox_inches='tight')
# plt.show()
| cc0-1.0 |
bptripp/it-cnn | tuning/orientation.py | 1 | 10458 | __author__ = 'bptripp'
import csv
import time
import numpy as np
import matplotlib.pyplot as plt
from cnn_stimuli import get_image_file_list
from alexnet import preprocess, load_net, load_vgg
from scipy.signal import fftconvolve
def mean_corr(out):
cc = np.corrcoef(out.T)
n = cc.shape[0]
print('n: ' + str(n))
print(out.shape)
cc_list = []
for i in range(n):
for j in range(i+1,n):
cc_list.append(cc[i,j])
print(cc_list)
mean_cc = np.mean(cc_list)
print('mean corr: ' + str(mean_cc))
def smooth(out, ind, n=5):
l = out.shape[0]
wrapped = np.zeros((out.shape[0]*3, len(ind)))
wrapped[0:l,:] = out[:,ind]
wrapped[l:2*l,:] = out[:,ind]
wrapped[2*l:3*l,:] = out[:,ind]
filter = 1./n*np.ones(n)
# print('filter: ' + str(filter))
for i in range(wrapped.shape[1]):
wrapped[:,i] = fftconvolve(wrapped[:,i], filter, 'same')
return wrapped[l:2*l,:]
def plot_curves(out, n, kernel_len=5, angles=None, normalize=False):
print('plotting')
if angles is None:
angles = np.linspace(0, 360, 91)
maxima = np.max(out, axis=0)
# n = 10
# ind = (-maxima).argsort()[:n]
# print('using first n')
# ind = range(n)
print('using first large n')
ind = []
i = 0
while len(ind) < n:
if maxima[i] > 2:
ind.append(i)
i = i + 1
print(ind)
smoothed = smooth(out, ind, n=kernel_len)
# if smooth:
# smoothed = smooth(out, ind)
# else:
# smoothed = out[:,ind]
mean_corr(smoothed)
# print(len(angles))
# print(smoothed.shape)
if normalize:
smoothed = smoothed / np.max(smoothed, axis=0)
plt.plot(angles, smoothed)
plt.xlim([np.min(angles), np.max(angles)])
def freiwald_depths(out):
"""
From Freiwald & Tsao (2010) Science, supplementary material, page 6 ...
Head orientation tuning depth was computed using the mean
response to frontal faces (Rfrontal), and the mean response to full profile faces in the preferred
direction (Rprofile) as follows:
Tuning Depth = (Rfrontal - Rprofile) / (Rfrontal + Rprofile)
"""
assert(out.shape[0] == 25)
frontal = np.maximum(0, out[0,:])
profile = np.maximum(0, np.maximum(out[6,:], out[19,:]))
m = np.max(out, axis=0)
frontal = frontal[m >= 1]
profile = profile[m >= 1]
return (frontal - profile) / (frontal + profile + 1e-3)
def plot_freiwald_histograms():
fractions = []
fractions.append(np.loadtxt(open("../data/freiwald-4h-am.csv","rb"),delimiter=","))
fractions.append(np.loadtxt(open("../data/freiwald-4h-al.csv","rb"),delimiter=","))
fractions.append(np.loadtxt(open("../data/freiwald-4h-mlmf.csv","rb"),delimiter=","))
plt.figure(figsize=(3,2*3))
for i in range(3):
plt.subplot(3,1,i+1)
f = fractions[i]
hist_edges = np.linspace(-1, 1, len(f)+1)
# hist_edges = hist_edges[:-1]
print(f.shape)
print(hist_edges.shape)
plt.bar(hist_edges[:-1]+.02, f, width=hist_edges[1]-hist_edges[0]-.04, color=[.5,.5,.5])
plt.xlim([-1, 1])
plt.ylim([0, .55])
plt.ylabel('Fraction of cells')
plt.xlabel('Head orientation tuning depth')
plt.tight_layout()
plt.savefig('../figures/orientation-freiwald.eps')
plt.show()
def plot_logothetis_and_freiwald_tuning_data():
plt.figure(figsize=(9,2.5))
plt.subplot(1,3,1)
plot_csv_tuning_curve('../data/logothetis-a.csv')
plot_csv_tuning_curve('../data/logothetis-b.csv')
plot_csv_tuning_curve('../data/logothetis-c.csv')
plot_csv_tuning_curve('../data/logothetis-d.csv')
plot_csv_tuning_curve('../data/logothetis-e.csv')
plt.ylabel('Response')
plt.xlabel('Angle (degrees)')
plt.xlim([-180, 180])
plt.xticks([-180, -60, 60, 180])
plt.subplot(1,3,3)
plot_csv_tuning_curve('../data/freiwald-1.csv')
plot_csv_tuning_curve('../data/freiwald-2.csv')
plot_csv_tuning_curve('../data/freiwald-3.csv')
plot_csv_tuning_curve('../data/freiwald-4.csv')
plt.xlabel('Angle (degrees)')
plt.xlim([-180, 180])
plt.xticks([-180, -60, 60, 180])
plt.tight_layout()
plt.savefig('../figures/tuning-data.eps')
plt.show()
def plot_csv_tuning_curve(filename):
data = np.loadtxt(open(filename, 'rb'), delimiter=',')
ind = np.argsort(data[:,0])
plt.plot(data[ind,0], data[ind,1])
if __name__ == '__main__':
# model = load_vgg(weights_path='../weights/vgg16_weights.h5', remove_level=2)
# use_vgg = True
# model = load_net(weights_path='../weights/alexnet_weights.h5', remove_level=1)
# use_vgg = False
#
# plt.figure(figsize=(6,6))
# # image_files = get_image_file_list('./images/swiss-knife-rotations/', 'png', with_path=True)
# image_files = get_image_file_list('./images/staple-rotations/', 'png', with_path=True)
# im = preprocess(image_files, use_vgg=use_vgg)
# out = model.predict(im)
# # plt.subplot(2,2,1)
# plt.subplot2grid((5,2), (0,0), rowspan=2)
# plot_curves(out, 10)
# # plt.ylim([0,12])
# plt.ylabel('Response')
# image_files = get_image_file_list('./images/shoe-rotations/', 'png', with_path=True)
# im = preprocess(image_files, use_vgg=use_vgg)
# out = model.predict(im)
# # plt.subplot(2,2,2)
# plt.subplot2grid((5,2), (0,1), rowspan=2)
# plot_curves(out, 10)
# # plt.ylim([0,12])
# image_files = get_image_file_list('./images/corolla-rotations/', 'png', with_path=True)
# im = preprocess(image_files, use_vgg=use_vgg)
# out = model.predict(im)
# # plt.subplot(2,2,3)
# plt.subplot2grid((5,2), (3,0), rowspan=2)
# plot_curves(out, 10)
# # plt.ylim([0,12])
# plt.xlabel('Angle (degrees)')
# plt.ylabel('Response')
# image_files = get_image_file_list('./images/banana-rotations/', 'png', with_path=True)
# im = preprocess(image_files, use_vgg=use_vgg)
# out = model.predict(im)
# # plt.subplot(2,2,4)
# plt.subplot2grid((5,2), (3,1), rowspan=2)
# plot_curves(out, 10)
# # plt.ylim([0,12])
# plt.xlabel('Angle (degrees)')
# plt.tight_layout()
# plt.savefig('../figures/orientation.eps')
# plt.show()
plot_freiwald_histograms()
# # plot_logothetis_and_freiwald_tuning_data()
# remove_levels = [0,1,2]
# use_vgg = False
#
# # remove_levels = [0,1,2]
# # use_vgg = False
#
# plt.figure(figsize=(9,2*len(remove_levels)))
#
# hist_edges = np.linspace(-1, 1, 10)
# freiwalds = []
#
# for i in range(len(remove_levels)):
# if use_vgg:
# model = load_vgg(weights_path='../weights/vgg16_weights.h5', remove_level=remove_levels[i])
# else:
# model = load_net(weights_path='../weights/alexnet_weights.h5', remove_level=remove_levels[i])
#
# plt.subplot(len(remove_levels),3,(3*i)+1)
# image_files = get_image_file_list('./source-images/staple/', 'jpg', with_path=True)
# im = preprocess(image_files, use_vgg=use_vgg)
# out = model.predict(im)
# # angles = np.array(range(0,361,10))
# wrap_indices = range(18, 36) + range(19)
# plot_curves(out[wrap_indices,:], 10, kernel_len=3, angles=range(-180,181,10))
# plt.xticks([-180, -60, 60, 180])
# plt.ylabel('Response')
# # plt.title('Staple')
# if i == len(remove_levels)-1:
# plt.xlabel('Angle (degrees)')
#
#
# plt.subplot(len(remove_levels),3,(3*i)+2)
# image_files = get_image_file_list('./source-images/scooter/', 'jpg', with_path=True)
# im = preprocess(image_files, use_vgg=use_vgg)
# out = model.predict(im)
# wrap_indices = range(12,24) + range(13)
# plot_curves(out[wrap_indices,:], 10, kernel_len=3, angles=range(-180,181,15))
# plt.xticks([-180, -60, 60, 180])
# # plt.title('Scooter')
# if i == len(remove_levels)-1:
# plt.xlabel('Angle (degrees)')
#
# plt.subplot(len(remove_levels),3,(3*i)+3)
# image_files = get_image_file_list('./source-images/head/', 'jpg', with_path=True)
# im = preprocess(image_files, use_vgg=use_vgg)
# out = model.predict(im)
# wrap_indices = range(12,24) + range(13)
# plot_curves(out[wrap_indices,:], 10, kernel_len=3, angles=range(-180,181,15))
# plt.xticks([-180, -60, 60, 180])
# # plt.title('Head')
# if i == len(remove_levels)-1:
# plt.xlabel('Angle (degrees)')
#
# fd = freiwald_depths(out)
# h, e = np.histogram(fd, hist_edges)
# h = h.astype(float)
# h = h / np.sum(h)
# freiwalds.append(h)
#
# plt.tight_layout()
# plt.savefig('../figures/orientation3d.eps')
# plt.show()
#
# plt.figure(figsize=(3,2*len(remove_levels)))
# for i in range(len(remove_levels)):
# plt.subplot(len(remove_levels),1,i+1)
# plt.bar(hist_edges[:-1]+.02, freiwalds[i], width=.2-.04, color=[.5,.5,.5])
# plt.xlim([-1, 1])
# plt.ylim([0, .55])
# plt.ylabel('Fraction of units')
#
# plt.xlabel('Head orientation tuning depth')
# plt.tight_layout()
# plt.savefig('../figures/orientation-freiwald-net.eps')
# plt.show()
# plt.figure(figsize=(7,3))
# n = 15
# kernel_len = 3
# angles = range(0,361,10)
# image_files = get_image_file_list('./source-images/staple/', 'jpg', with_path=True)
# im = preprocess(image_files)
# out = model.predict(im)
# plt.subplot(1,2,1)
# plot_curves(out, n, kernel_len=kernel_len, angles=angles, normalize=True)
# plt.ylabel('Response')
# plt.ylim([0,1])
# plt.title('CNN')
#
# fake = np.zeros((out.shape[0], n))
# np.random.seed(1)
# prefs = 360.*np.random.rand(n)
# i = 0
# widths = np.zeros(n)
# while i < n:
# width = 30. + 15. * np.random.randn()
# if width > 1:
# widths[i] = width
# i = i + 1
# print(widths)
#
# for i in range(n):
# fake[:,i] = np.exp(-(angles-prefs[i])**2 / 2 / widths[i]**2)
#
#
# plt.subplot(1,2,2)
# plot_curves(fake, n, kernel_len=kernel_len, angles=angles, normalize=True)
# plt.title('Empirical')
# plt.ylim([0,1])
#
# plt.tight_layout()
# plt.savefig('../figures/orientation-salman.eps')
# plt.show()
| mit |
RachitKansal/scikit-learn | examples/covariance/plot_sparse_cov.py | 300 | 5078 | """
======================================
Sparse inverse covariance estimation
======================================
Using the GraphLasso estimator to learn a covariance and sparse precision
from a small number of samples.
To estimate a probabilistic model (e.g. a Gaussian model), estimating the
precision matrix, that is the inverse covariance matrix, is as important
as estimating the covariance matrix. Indeed a Gaussian model is
parametrized by the precision matrix.
To be in favorable recovery conditions, we sample the data from a model
with a sparse inverse covariance matrix. In addition, we ensure that the
data is not too much correlated (limiting the largest coefficient of the
precision matrix) and that there a no small coefficients in the
precision matrix that cannot be recovered. In addition, with a small
number of observations, it is easier to recover a correlation matrix
rather than a covariance, thus we scale the time series.
Here, the number of samples is slightly larger than the number of
dimensions, thus the empirical covariance is still invertible. However,
as the observations are strongly correlated, the empirical covariance
matrix is ill-conditioned and as a result its inverse --the empirical
precision matrix-- is very far from the ground truth.
If we use l2 shrinkage, as with the Ledoit-Wolf estimator, as the number
of samples is small, we need to shrink a lot. As a result, the
Ledoit-Wolf precision is fairly close to the ground truth precision, that
is not far from being diagonal, but the off-diagonal structure is lost.
The l1-penalized estimator can recover part of this off-diagonal
structure. It learns a sparse precision. It is not able to
recover the exact sparsity pattern: it detects too many non-zero
coefficients. However, the highest non-zero coefficients of the l1
estimated correspond to the non-zero coefficients in the ground truth.
Finally, the coefficients of the l1 precision estimate are biased toward
zero: because of the penalty, they are all smaller than the corresponding
ground truth value, as can be seen on the figure.
Note that, the color range of the precision matrices is tweaked to
improve readability of the figure. The full range of values of the
empirical precision is not displayed.
The alpha parameter of the GraphLasso setting the sparsity of the model is
set by internal cross-validation in the GraphLassoCV. As can be
seen on figure 2, the grid to compute the cross-validation score is
iteratively refined in the neighborhood of the maximum.
"""
print(__doc__)
# author: Gael Varoquaux <[email protected]>
# License: BSD 3 clause
# Copyright: INRIA
import numpy as np
from scipy import linalg
from sklearn.datasets import make_sparse_spd_matrix
from sklearn.covariance import GraphLassoCV, ledoit_wolf
import matplotlib.pyplot as plt
##############################################################################
# Generate the data
n_samples = 60
n_features = 20
prng = np.random.RandomState(1)
prec = make_sparse_spd_matrix(n_features, alpha=.98,
smallest_coef=.4,
largest_coef=.7,
random_state=prng)
cov = linalg.inv(prec)
d = np.sqrt(np.diag(cov))
cov /= d
cov /= d[:, np.newaxis]
prec *= d
prec *= d[:, np.newaxis]
X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples)
X -= X.mean(axis=0)
X /= X.std(axis=0)
##############################################################################
# Estimate the covariance
emp_cov = np.dot(X.T, X) / n_samples
model = GraphLassoCV()
model.fit(X)
cov_ = model.covariance_
prec_ = model.precision_
lw_cov_, _ = ledoit_wolf(X)
lw_prec_ = linalg.inv(lw_cov_)
##############################################################################
# Plot the results
plt.figure(figsize=(10, 6))
plt.subplots_adjust(left=0.02, right=0.98)
# plot the covariances
covs = [('Empirical', emp_cov), ('Ledoit-Wolf', lw_cov_),
('GraphLasso', cov_), ('True', cov)]
vmax = cov_.max()
for i, (name, this_cov) in enumerate(covs):
plt.subplot(2, 4, i + 1)
plt.imshow(this_cov, interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s covariance' % name)
# plot the precisions
precs = [('Empirical', linalg.inv(emp_cov)), ('Ledoit-Wolf', lw_prec_),
('GraphLasso', prec_), ('True', prec)]
vmax = .9 * prec_.max()
for i, (name, this_prec) in enumerate(precs):
ax = plt.subplot(2, 4, i + 5)
plt.imshow(np.ma.masked_equal(this_prec, 0),
interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s precision' % name)
ax.set_axis_bgcolor('.7')
# plot the model selection metric
plt.figure(figsize=(4, 3))
plt.axes([.2, .15, .75, .7])
plt.plot(model.cv_alphas_, np.mean(model.grid_scores, axis=1), 'o-')
plt.axvline(model.alpha_, color='.5')
plt.title('Model selection')
plt.ylabel('Cross-validation score')
plt.xlabel('alpha')
plt.show()
| bsd-3-clause |
andaag/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_01_language_train_model.py | 254 | 2253 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a an vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
vectorizer = TfidfVectorizer(ngram_range=(1, 3), analyzer='char',
use_idf=False)
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
clf = Pipeline([
('vec', vectorizer),
('clf', Perceptron()),
])
# TASK: Fit the pipeline on the training set
clf.fit(docs_train, y_train)
# TASK: Predict the outcome on the testing set in a variable named y_predicted
y_predicted = clf.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import pylab as pl
#pl.matshow(cm, cmap=pl.cm.jet)
#pl.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
largelymfs/w2vtools | build/scipy/scipy/signal/ltisys.py | 5 | 30979 | """
ltisys -- a collection of classes and functions for modeling linear
time invariant systems.
"""
from __future__ import division, print_function, absolute_import
#
# Author: Travis Oliphant 2001
#
# Feb 2010: Warren Weckesser
# Rewrote lsim2 and added impulse2.
# Aug 2013: Juan Luis Cano
# Rewrote abcd_normalize.
#
from .filter_design import tf2zpk, zpk2tf, normalize, freqs
import numpy
from numpy import product, zeros, array, dot, transpose, ones, \
nan_to_num, zeros_like, linspace
import scipy.interpolate as interpolate
import scipy.integrate as integrate
import scipy.linalg as linalg
from scipy.lib.six import xrange
from numpy import r_, eye, real, atleast_1d, atleast_2d, poly, \
squeeze, diag, asarray
__all__ = ['tf2ss', 'ss2tf', 'abcd_normalize', 'zpk2ss', 'ss2zpk', 'lti',
'lsim', 'lsim2', 'impulse', 'impulse2', 'step', 'step2', 'bode',
'freqresp']
def tf2ss(num, den):
"""Transfer function to state-space representation.
Parameters
----------
num, den : array_like
Sequences representing the numerator and denominator polynomials.
The denominator needs to be at least as long as the numerator.
Returns
-------
A, B, C, D : ndarray
State space representation of the system, in controller canonical
form.
"""
# Controller canonical state-space representation.
# if M+1 = len(num) and K+1 = len(den) then we must have M <= K
# states are found by asserting that X(s) = U(s) / D(s)
# then Y(s) = N(s) * X(s)
#
# A, B, C, and D follow quite naturally.
#
num, den = normalize(num, den) # Strips zeros, checks arrays
nn = len(num.shape)
if nn == 1:
num = asarray([num], num.dtype)
M = num.shape[1]
K = len(den)
if M > K:
msg = "Improper transfer function. `num` is longer than `den`."
raise ValueError(msg)
if M == 0 or K == 0: # Null system
return array([], float), array([], float), array([], float), \
array([], float)
# pad numerator to have same number of columns has denominator
num = r_['-1', zeros((num.shape[0], K - M), num.dtype), num]
if num.shape[-1] > 0:
D = num[:, 0]
else:
D = array([], float)
if K == 1:
return array([], float), array([], float), array([], float), D
frow = -array([den[1:]])
A = r_[frow, eye(K - 2, K - 1)]
B = eye(K - 1, 1)
C = num[:, 1:] - num[:, 0] * den[1:]
return A, B, C, D
def _none_to_empty_2d(arg):
if arg is None:
return zeros((0, 0))
else:
return arg
def _atleast_2d_or_none(arg):
if arg is not None:
return atleast_2d(arg)
def _shape_or_none(M):
if M is not None:
return M.shape
else:
return (None,) * 2
def _choice_not_none(*args):
for arg in args:
if arg is not None:
return arg
def _restore(M, shape):
if M.shape == (0, 0):
return zeros(shape)
else:
if M.shape != shape:
raise ValueError("The input arrays have incompatible shapes.")
return M
def abcd_normalize(A=None, B=None, C=None, D=None):
"""Check state-space matrices and ensure they are rank-2.
If enough information on the system is provided, that is, enough
properly-shaped arrays are passed to the function, the missing ones
are built from this information, ensuring the correct number of
rows and columns. Otherwise a ValueError is raised.
Parameters
----------
A, B, C, D : array_like, optional
State-space matrices. All of them are None (missing) by default.
Returns
-------
A, B, C, D : array
Properly shaped state-space matrices.
Raises
------
ValueError
If not enough information on the system was provided.
"""
A, B, C, D = map(_atleast_2d_or_none, (A, B, C, D))
MA, NA = _shape_or_none(A)
MB, NB = _shape_or_none(B)
MC, NC = _shape_or_none(C)
MD, ND = _shape_or_none(D)
p = _choice_not_none(MA, MB, NC)
q = _choice_not_none(NB, ND)
r = _choice_not_none(MC, MD)
if p is None or q is None or r is None:
raise ValueError("Not enough information on the system.")
A, B, C, D = map(_none_to_empty_2d, (A, B, C, D))
A = _restore(A, (p, p))
B = _restore(B, (p, q))
C = _restore(C, (r, p))
D = _restore(D, (r, q))
return A, B, C, D
def ss2tf(A, B, C, D, input=0):
"""State-space to transfer function.
Parameters
----------
A, B, C, D : ndarray
State-space representation of linear system.
input : int, optional
For multiple-input systems, the input to use.
Returns
-------
num : 2-D ndarray
Numerator(s) of the resulting transfer function(s). `num` has one row
for each of the system's outputs. Each row is a sequence representation
of the numerator polynomial.
den : 1-D ndarray
Denominator of the resulting transfer function(s). `den` is a sequence
representation of the denominator polynomial.
"""
# transfer function is C (sI - A)**(-1) B + D
A, B, C, D = map(asarray, (A, B, C, D))
# Check consistency and make them all rank-2 arrays
A, B, C, D = abcd_normalize(A, B, C, D)
nout, nin = D.shape
if input >= nin:
raise ValueError("System does not have the input specified.")
# make MOSI from possibly MOMI system.
if B.shape[-1] != 0:
B = B[:, input]
B.shape = (B.shape[0], 1)
if D.shape[-1] != 0:
D = D[:, input]
try:
den = poly(A)
except ValueError:
den = 1
if (product(B.shape, axis=0) == 0) and (product(C.shape, axis=0) == 0):
num = numpy.ravel(D)
if (product(D.shape, axis=0) == 0) and (product(A.shape, axis=0) == 0):
den = []
return num, den
num_states = A.shape[0]
type_test = A[:, 0] + B[:, 0] + C[0, :] + D
num = numpy.zeros((nout, num_states + 1), type_test.dtype)
for k in range(nout):
Ck = atleast_2d(C[k, :])
num[k] = poly(A - dot(B, Ck)) + (D[k] - 1) * den
return num, den
def zpk2ss(z, p, k):
"""Zero-pole-gain representation to state-space representation
Parameters
----------
z, p : sequence
Zeros and poles.
k : float
System gain.
Returns
-------
A, B, C, D : ndarray
State space representation of the system, in controller canonical
form.
"""
return tf2ss(*zpk2tf(z, p, k))
def ss2zpk(A, B, C, D, input=0):
"""State-space representation to zero-pole-gain representation.
Parameters
----------
A, B, C, D : ndarray
State-space representation of linear system.
input : int, optional
For multiple-input systems, the input to use.
Returns
-------
z, p : sequence
Zeros and poles.
k : float
System gain.
"""
return tf2zpk(*ss2tf(A, B, C, D, input=input))
class lti(object):
"""Linear Time Invariant class which simplifies representation.
Parameters
----------
args : arguments
The `lti` class can be instantiated with either 2, 3 or 4 arguments.
The following gives the number of elements in the tuple and the
interpretation:
* 2: (numerator, denominator)
* 3: (zeros, poles, gain)
* 4: (A, B, C, D)
Each argument can be an array or sequence.
Notes
-----
`lti` instances have all types of representations available; for example
after creating an instance s with ``(zeros, poles, gain)`` the transfer
function representation (numerator, denominator) can be accessed as
``s.num`` and ``s.den``.
"""
def __init__(self, *args, **kwords):
"""
Initialize the LTI system using either:
- (numerator, denominator)
- (zeros, poles, gain)
- (A, B, C, D) : state-space.
"""
N = len(args)
if N == 2: # Numerator denominator transfer function input
self._num, self._den = normalize(*args)
self._update(N)
self.inputs = 1
if len(self.num.shape) > 1:
self.outputs = self.num.shape[0]
else:
self.outputs = 1
elif N == 3: # Zero-pole-gain form
self._zeros, self._poles, self._gain = args
self._update(N)
# make sure we have numpy arrays
self.zeros = numpy.asarray(self.zeros)
self.poles = numpy.asarray(self.poles)
self.inputs = 1
if len(self.zeros.shape) > 1:
self.outputs = self.zeros.shape[0]
else:
self.outputs = 1
elif N == 4: # State-space form
self._A, self._B, self._C, self._D = abcd_normalize(*args)
self._update(N)
self.inputs = self.B.shape[-1]
self.outputs = self.C.shape[0]
else:
raise ValueError("Needs 2, 3, or 4 arguments.")
def __repr__(self):
"""
Canonical representation using state-space to preserve numerical
precision and any MIMO information
"""
return '{0}(\n{1},\n{2},\n{3},\n{4}\n)'.format(
self.__class__.__name__,
repr(self.A),
repr(self.B),
repr(self.C),
repr(self.D),
)
@property
def num(self):
return self._num
@num.setter
def num(self, value):
self._num = value
self._update(2)
@property
def den(self):
return self._den
@den.setter
def den(self, value):
self._den = value
self._update(2)
@property
def zeros(self):
return self._zeros
@zeros.setter
def zeros(self, value):
self._zeros = value
self._update(3)
@property
def poles(self):
return self._poles
@poles.setter
def poles(self, value):
self._poles = value
self._update(3)
@property
def gain(self):
return self._gain
@gain.setter
def gain(self, value):
self._gain = value
self._update(3)
@property
def A(self):
return self._A
@A.setter
def A(self, value):
self._A = value
self._update(4)
@property
def B(self):
return self._B
@B.setter
def B(self, value):
self._B = value
self._update(4)
@property
def C(self):
return self._C
@C.setter
def C(self, value):
self._C = value
self._update(4)
@property
def D(self):
return self._D
@D.setter
def D(self, value):
self._D = value
self._update(4)
def _update(self, N):
if N == 2:
self._zeros, self._poles, self._gain = tf2zpk(self.num, self.den)
self._A, self._B, self._C, self._D = tf2ss(self.num, self.den)
if N == 3:
self._num, self._den = zpk2tf(self.zeros, self.poles, self.gain)
self._A, self._B, self._C, self._D = zpk2ss(self.zeros,
self.poles, self.gain)
if N == 4:
self._num, self._den = ss2tf(self.A, self.B, self.C, self.D)
self._zeros, self._poles, self._gain = ss2zpk(self.A, self.B,
self.C, self.D)
def impulse(self, X0=None, T=None, N=None):
return impulse(self, X0=X0, T=T, N=N)
def step(self, X0=None, T=None, N=None):
return step(self, X0=X0, T=T, N=N)
def output(self, U, T, X0=None):
return lsim(self, U, T, X0=X0)
def bode(self, w=None, n=100):
"""
Calculate Bode magnitude and phase data.
Returns a 3-tuple containing arrays of frequencies [rad/s], magnitude
[dB] and phase [deg]. See scipy.signal.bode for details.
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> s1 = signal.lti([1], [1, 1])
>>> w, mag, phase = s1.bode()
>>> plt.figure()
>>> plt.semilogx(w, mag) # Bode magnitude plot
>>> plt.figure()
>>> plt.semilogx(w, phase) # Bode phase plot
>>> plt.show()
"""
return bode(self, w=w, n=n)
def freqresp(self, w=None, n=10000):
"""Calculate the frequency response of a continuous-time system.
Returns a 2-tuple containing arrays of frequencies [rad/s] and
complex magnitude.
See scipy.signal.freqresp for details.
"""
return freqresp(self, w=w, n=n)
def lsim2(system, U=None, T=None, X0=None, **kwargs):
"""
Simulate output of a continuous-time linear system, by using
the ODE solver `scipy.integrate.odeint`.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2: (num, den)
* 3: (zeros, poles, gain)
* 4: (A, B, C, D)
U : array_like (1D or 2D), optional
An input array describing the input at each time T. Linear
interpolation is used between given times. If there are
multiple inputs, then each column of the rank-2 array
represents an input. If U is not given, the input is assumed
to be zero.
T : array_like (1D or 2D), optional
The time steps at which the input is defined and at which the
output is desired. The default is 101 evenly spaced points on
the interval [0,10.0].
X0 : array_like (1D), optional
The initial condition of the state vector. If `X0` is not
given, the initial conditions are assumed to be 0.
kwargs : dict
Additional keyword arguments are passed on to the function
`odeint`. See the notes below for more details.
Returns
-------
T : 1D ndarray
The time values for the output.
yout : ndarray
The response of the system.
xout : ndarray
The time-evolution of the state-vector.
Notes
-----
This function uses `scipy.integrate.odeint` to solve the
system's differential equations. Additional keyword arguments
given to `lsim2` are passed on to `odeint`. See the documentation
for `scipy.integrate.odeint` for the full list of arguments.
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
if X0 is None:
X0 = zeros(sys.B.shape[0], sys.A.dtype)
if T is None:
# XXX T should really be a required argument, but U was
# changed from a required positional argument to a keyword,
# and T is after U in the argument list. So we either: change
# the API and move T in front of U; check here for T being
# None and raise an exception; or assign a default value to T
# here. This code implements the latter.
T = linspace(0, 10.0, 101)
T = atleast_1d(T)
if len(T.shape) != 1:
raise ValueError("T must be a rank-1 array.")
if U is not None:
U = atleast_1d(U)
if len(U.shape) == 1:
U = U.reshape(-1, 1)
sU = U.shape
if sU[0] != len(T):
raise ValueError("U must have the same number of rows "
"as elements in T.")
if sU[1] != sys.inputs:
raise ValueError("The number of inputs in U (%d) is not "
"compatible with the number of system "
"inputs (%d)" % (sU[1], sys.inputs))
# Create a callable that uses linear interpolation to
# calculate the input at any time.
ufunc = interpolate.interp1d(T, U, kind='linear',
axis=0, bounds_error=False)
def fprime(x, t, sys, ufunc):
"""The vector field of the linear system."""
return dot(sys.A, x) + squeeze(dot(sys.B, nan_to_num(ufunc([t]))))
xout = integrate.odeint(fprime, X0, T, args=(sys, ufunc), **kwargs)
yout = dot(sys.C, transpose(xout)) + dot(sys.D, transpose(U))
else:
def fprime(x, t, sys):
"""The vector field of the linear system."""
return dot(sys.A, x)
xout = integrate.odeint(fprime, X0, T, args=(sys,), **kwargs)
yout = dot(sys.C, transpose(xout))
return T, squeeze(transpose(yout)), xout
def _cast_to_array_dtype(in1, in2):
"""Cast array to dtype of other array, while avoiding ComplexWarning.
Those can be raised when casting complex to real.
"""
if numpy.issubdtype(in2.dtype, numpy.float):
# dtype to cast to is not complex, so use .real
in1 = in1.real.astype(in2.dtype)
else:
in1 = in1.astype(in2.dtype)
return in1
def lsim(system, U, T, X0=None, interp=1):
"""
Simulate output of a continuous-time linear system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2: (num, den)
* 3: (zeros, poles, gain)
* 4: (A, B, C, D)
U : array_like
An input array describing the input at each time `T`
(interpolation is assumed between given times). If there are
multiple inputs, then each column of the rank-2 array
represents an input.
T : array_like
The time steps at which the input is defined and at which the
output is desired.
X0 :
The initial conditions on the state vector (zero by default).
interp : {1, 0}
Whether to use linear (1) or zero-order hold (0) interpolation.
Returns
-------
T : 1D ndarray
Time values for the output.
yout : 1D ndarray
System response.
xout : ndarray
Time-evolution of the state-vector.
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
U = atleast_1d(U)
T = atleast_1d(T)
if len(U.shape) == 1:
U = U.reshape((U.shape[0], 1))
sU = U.shape
if len(T.shape) != 1:
raise ValueError("T must be a rank-1 array.")
if sU[0] != len(T):
raise ValueError("U must have the same number of rows "
"as elements in T.")
if sU[1] != sys.inputs:
raise ValueError("System does not define that many inputs.")
if X0 is None:
X0 = zeros(sys.B.shape[0], sys.A.dtype)
xout = zeros((len(T), sys.B.shape[0]), sys.A.dtype)
xout[0] = X0
A = sys.A
AT, BT = transpose(sys.A), transpose(sys.B)
dt = T[1] - T[0]
lam, v = linalg.eig(A)
vt = transpose(v)
vti = linalg.inv(vt)
GT = dot(dot(vti, diag(numpy.exp(dt * lam))), vt)
GT = _cast_to_array_dtype(GT, xout)
ATm1 = linalg.inv(AT)
ATm2 = dot(ATm1, ATm1)
I = eye(A.shape[0], dtype=A.dtype)
GTmI = GT - I
F1T = dot(dot(BT, GTmI), ATm1)
if interp:
F2T = dot(BT, dot(GTmI, ATm2) / dt - ATm1)
for k in xrange(1, len(T)):
dt1 = T[k] - T[k - 1]
if dt1 != dt:
dt = dt1
GT = dot(dot(vti, diag(numpy.exp(dt * lam))), vt)
GT = _cast_to_array_dtype(GT, xout)
GTmI = GT - I
F1T = dot(dot(BT, GTmI), ATm1)
if interp:
F2T = dot(BT, dot(GTmI, ATm2) / dt - ATm1)
xout[k] = dot(xout[k - 1], GT) + dot(U[k - 1], F1T)
if interp:
xout[k] = xout[k] + dot((U[k] - U[k - 1]), F2T)
yout = (squeeze(dot(U, transpose(sys.D))) +
squeeze(dot(xout, transpose(sys.C))))
return T, squeeze(yout), squeeze(xout)
def _default_response_times(A, n):
"""Compute a reasonable set of time samples for the response time.
This function is used by `impulse`, `impulse2`, `step` and `step2`
to compute the response time when the `T` argument to the function
is None.
Parameters
----------
A : ndarray
The system matrix, which is square.
n : int
The number of time samples to generate.
Returns
-------
t : ndarray
The 1-D array of length `n` of time samples at which the response
is to be computed.
"""
# Create a reasonable time interval.
# TODO: This could use some more work.
# For example, what is expected when the system is unstable?
vals = linalg.eigvals(A)
r = min(abs(real(vals)))
if r == 0.0:
r = 1.0
tc = 1.0 / r
t = linspace(0.0, 7 * tc, n)
return t
def impulse(system, X0=None, T=None, N=None):
"""Impulse response of continuous-time system.
Parameters
----------
system : an instance of the LTI class or a tuple of array_like
describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
X0 : array_like, optional
Initial state-vector. Defaults to zero.
T : array_like, optional
Time points. Computed if not given.
N : int, optional
The number of time points to compute (if `T` is not given).
Returns
-------
T : ndarray
A 1-D array of time points.
yout : ndarray
A 1-D array containing the impulse response of the system (except for
singularities at zero).
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
if X0 is None:
B = sys.B
else:
B = sys.B + X0
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
else:
T = asarray(T)
h = zeros(T.shape, sys.A.dtype)
s, v = linalg.eig(sys.A)
vi = linalg.inv(v)
C = sys.C
for k in range(len(h)):
es = diag(numpy.exp(s * T[k]))
eA = dot(dot(v, es), vi)
eA = _cast_to_array_dtype(eA, h)
h[k] = squeeze(dot(dot(C, eA), B))
return T, h
def impulse2(system, X0=None, T=None, N=None, **kwargs):
"""
Impulse response of a single-input, continuous-time linear system.
Parameters
----------
system : an instance of the LTI class or a tuple of array_like
describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
X0 : 1-D array_like, optional
The initial condition of the state vector. Default: 0 (the
zero vector).
T : 1-D array_like, optional
The time steps at which the input is defined and at which the
output is desired. If `T` is not given, the function will
generate a set of time samples automatically.
N : int, optional
Number of time points to compute. Default: 100.
kwargs : various types
Additional keyword arguments are passed on to the function
`scipy.signal.lsim2`, which in turn passes them on to
`scipy.integrate.odeint`; see the latter's documentation for
information about these arguments.
Returns
-------
T : ndarray
The time values for the output.
yout : ndarray
The output response of the system.
See Also
--------
impulse, lsim2, integrate.odeint
Notes
-----
The solution is generated by calling `scipy.signal.lsim2`, which uses
the differential equation solver `scipy.integrate.odeint`.
.. versionadded:: 0.8.0
Examples
--------
Second order system with a repeated root: x''(t) + 2*x(t) + x(t) = u(t)
>>> from scipy import signal
>>> system = ([1.0], [1.0, 2.0, 1.0])
>>> t, y = signal.impulse2(system)
>>> import matplotlib.pyplot as plt
>>> plt.plot(t, y)
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
B = sys.B
if B.shape[-1] != 1:
raise ValueError("impulse2() requires a single-input system.")
B = B.squeeze()
if X0 is None:
X0 = zeros_like(B)
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
# Move the impulse in the input to the initial conditions, and then
# solve using lsim2().
ic = B + X0
Tr, Yr, Xr = lsim2(sys, T=T, X0=ic, **kwargs)
return Tr, Yr
def step(system, X0=None, T=None, N=None):
"""Step response of continuous-time system.
Parameters
----------
system : an instance of the LTI class or a tuple of array_like
describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
X0 : array_like, optional
Initial state-vector (default is zero).
T : array_like, optional
Time points (computed if not given).
N : int
Number of time points to compute if `T` is not given.
Returns
-------
T : 1D ndarray
Output time points.
yout : 1D ndarray
Step response of system.
See also
--------
scipy.signal.step2
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
else:
T = asarray(T)
U = ones(T.shape, sys.A.dtype)
vals = lsim(sys, U, T, X0=X0)
return vals[0], vals[1]
def step2(system, X0=None, T=None, N=None, **kwargs):
"""Step response of continuous-time system.
This function is functionally the same as `scipy.signal.step`, but
it uses the function `scipy.signal.lsim2` to compute the step
response.
Parameters
----------
system : an instance of the LTI class or a tuple of array_like
describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
X0 : array_like, optional
Initial state-vector (default is zero).
T : array_like, optional
Time points (computed if not given).
N : int
Number of time points to compute if `T` is not given.
kwargs : various types
Additional keyword arguments are passed on the function
`scipy.signal.lsim2`, which in turn passes them on to
`scipy.integrate.odeint`. See the documentation for
`scipy.integrate.odeint` for information about these arguments.
Returns
-------
T : 1D ndarray
Output time points.
yout : 1D ndarray
Step response of system.
See also
--------
scipy.signal.step
Notes
-----
.. versionadded:: 0.8.0
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
else:
T = asarray(T)
U = ones(T.shape, sys.A.dtype)
vals = lsim2(sys, U, T, X0=X0, **kwargs)
return vals[0], vals[1]
def bode(system, w=None, n=100):
"""
Calculate Bode magnitude and phase data of a continuous-time system.
.. versionadded:: 0.11.0
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
w : array_like, optional
Array of frequencies (in rad/s). Magnitude and phase data is calculated
for every value in this array. If not given a reasonable set will be
calculated.
n : int, optional
Number of frequency points to compute if `w` is not given. The `n`
frequencies are logarithmically spaced in an interval chosen to
include the influence of the poles and zeros of the system.
Returns
-------
w : 1D ndarray
Frequency array [rad/s]
mag : 1D ndarray
Magnitude array [dB]
phase : 1D ndarray
Phase array [deg]
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> s1 = signal.lti([1], [1, 1])
>>> w, mag, phase = signal.bode(s1)
>>> plt.figure()
>>> plt.semilogx(w, mag) # Bode magnitude plot
>>> plt.figure()
>>> plt.semilogx(w, phase) # Bode phase plot
>>> plt.show()
"""
w, y = freqresp(system, w=w, n=n)
mag = 20.0 * numpy.log10(abs(y))
phase = numpy.unwrap(numpy.arctan2(y.imag, y.real)) * 180.0 / numpy.pi
return w, mag, phase
def freqresp(system, w=None, n=10000):
"""Calculate the frequency response of a continuous-time system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
w : array_like, optional
Array of frequencies (in rad/s). Magnitude and phase data is
calculated for every value in this array. If not given a reasonable
set will be calculated.
n : int, optional
Number of frequency points to compute if `w` is not given. The `n`
frequencies are logarithmically spaced in an interval chosen to
include the influence of the poles and zeros of the system.
Returns
-------
w : 1D ndarray
Frequency array [rad/s]
H : 1D ndarray
Array of complex magnitude values
Examples
--------
# Generating the Nyquist plot of a transfer function
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> s1 = signal.lti([], [1, 1, 1], [5])
# transfer function: H(s) = 5 / (s-1)^3
>>> w, H = signal.freqresp(s1)
>>> plt.figure()
>>> plt.plot(H.real, H.imag, "b")
>>> plt.plot(H.real, -H.imag, "r")
>>> plt.show()
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
if sys.inputs != 1 or sys.outputs != 1:
raise ValueError("freqresp() requires a SISO (single input, single "
"output) system.")
if w is not None:
worN = w
else:
worN = n
# In the call to freqs(), sys.num.ravel() is used because there are
# cases where sys.num is a 2-D array with a single row.
w, h = freqs(sys.num.ravel(), sys.den, worN=worN)
return w, h
| mit |
pelagos/paparazzi | sw/airborne/test/ahrs/ahrs_utils.py | 86 | 4923 | #! /usr/bin/env python
# Copyright (C) 2011 Antoine Drouin
#
# This file is part of Paparazzi.
#
# Paparazzi is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# Paparazzi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Paparazzi; see the file COPYING. If not, write to
# the Free Software Foundation, 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
from __future__ import print_function
import subprocess
import numpy as np
import matplotlib.pyplot as plt
def run_simulation(ahrs_type, build_opt, traj_nb):
print("\nBuilding ahrs")
args = ["make", "clean", "run_ahrs_on_synth", "AHRS_TYPE=AHRS_TYPE_" + ahrs_type] + build_opt
#print(args)
p = subprocess.Popen(args=args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False)
outputlines = p.stdout.readlines()
p.wait()
for i in outputlines:
print(" # " + i, end=' ')
print()
print("Running simulation")
print(" using traj " + str(traj_nb))
p = subprocess.Popen(args=["./run_ahrs_on_synth", str(traj_nb)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
shell=False)
outputlines = p.stdout.readlines()
p.wait()
# for i in outputlines:
# print(" "+i, end=' ')
# print("\n")
ahrs_data_type = [('time', 'float32'),
('phi_true', 'float32'), ('theta_true', 'float32'), ('psi_true', 'float32'),
('p_true', 'float32'), ('q_true', 'float32'), ('r_true', 'float32'),
('bp_true', 'float32'), ('bq_true', 'float32'), ('br_true', 'float32'),
('phi_ahrs', 'float32'), ('theta_ahrs', 'float32'), ('psi_ahrs', 'float32'),
('p_ahrs', 'float32'), ('q_ahrs', 'float32'), ('r_ahrs', 'float32'),
('bp_ahrs', 'float32'), ('bq_ahrs', 'float32'), ('br_ahrs', 'float32')]
mydescr = np.dtype(ahrs_data_type)
data = [[] for dummy in xrange(len(mydescr))]
# import code; code.interact(local=locals())
for line in outputlines:
if line.startswith("#"):
print(" " + line, end=' ')
else:
fields = line.strip().split(' ')
#print(fields)
for i, number in enumerate(fields):
data[i].append(number)
print()
for i in xrange(len(mydescr)):
data[i] = np.cast[mydescr[i]](data[i])
return np.rec.array(data, dtype=mydescr)
def plot_simulation_results(plot_true_state, lsty, label, sim_res):
print("Plotting Results")
# f, (ax1, ax2, ax3) = plt.subplots(3, sharex=True, sharey=True)
plt.subplot(3, 3, 1)
plt.plot(sim_res.time, sim_res.phi_ahrs, lsty, label=label)
plt.ylabel('degres')
plt.title('phi')
plt.legend()
plt.subplot(3, 3, 2)
plt.plot(sim_res.time, sim_res.theta_ahrs, lsty)
plt.title('theta')
plt.subplot(3, 3, 3)
plt.plot(sim_res.time, sim_res.psi_ahrs, lsty)
plt.title('psi')
plt.subplot(3, 3, 4)
plt.plot(sim_res.time, sim_res.p_ahrs, lsty)
plt.ylabel('degres/s')
plt.title('p')
plt.subplot(3, 3, 5)
plt.plot(sim_res.time, sim_res.q_ahrs, lsty)
plt.title('q')
plt.subplot(3, 3, 6)
plt.plot(sim_res.time, sim_res.r_ahrs, lsty)
plt.title('r')
plt.subplot(3, 3, 7)
plt.plot(sim_res.time, sim_res.bp_ahrs, lsty)
plt.ylabel('degres/s')
plt.xlabel('time in s')
plt.title('bp')
plt.subplot(3, 3, 8)
plt.plot(sim_res.time, sim_res.bq_ahrs, lsty)
plt.xlabel('time in s')
plt.title('bq')
plt.subplot(3, 3, 9)
plt.plot(sim_res.time, sim_res.br_ahrs, lsty)
plt.xlabel('time in s')
plt.title('br')
if plot_true_state:
plt.subplot(3, 3, 1)
plt.plot(sim_res.time, sim_res.phi_true, 'r--')
plt.subplot(3, 3, 2)
plt.plot(sim_res.time, sim_res.theta_true, 'r--')
plt.subplot(3, 3, 3)
plt.plot(sim_res.time, sim_res.psi_true, 'r--')
plt.subplot(3, 3, 4)
plt.plot(sim_res.time, sim_res.p_true, 'r--')
plt.subplot(3, 3, 5)
plt.plot(sim_res.time, sim_res.q_true, 'r--')
plt.subplot(3, 3, 6)
plt.plot(sim_res.time, sim_res.r_true, 'r--')
plt.subplot(3, 3, 7)
plt.plot(sim_res.time, sim_res.bp_true, 'r--')
plt.subplot(3, 3, 8)
plt.plot(sim_res.time, sim_res.bq_true, 'r--')
plt.subplot(3, 3, 9)
plt.plot(sim_res.time, sim_res.br_true, 'r--')
def show_plot():
plt.show()
| gpl-2.0 |
zimingd/synapsePythonClient | tests/load/test_speed_upload.py | 1 | 1365 | import synapseclient
from synapseclient import Project, File
import synapseclient.utils as utils
from datetime import datetime
import filecmp
import os, traceback
import argparse
import random
from synapseclient.utils import MB, GB
syn = None
def setup(module):
module.syn = synapseclient.Synapse()
module.syn.login()
def test_upload_speed(uploadSize=60 + 777771, threadCount=5):
import time
fh = None
filepath = utils.make_bogus_binary_file(uploadSize*MB)
try:
t0 = time.time()
fh = syn._uploadToFileHandleService(filepath, threadCount=threadCount)
dt = time.time()-t0
finally:
try:
os.remove(filepath)
except Exception:
print(traceback.format_exc())
if fh:
syn._deleteFileHandle(fh)
return dt
def main():
import pandas as pd
import numpy as np
global syn
syn = synapseclient.Synapse()
syn.login(silent=True)
sizes = [1,5,10,100,500,1000]
threads = [1,2,4,6,8,16]
results = pd.DataFrame(np.zeros((len(sizes), len(threads))), columns=threads, index=sizes)
results.index.aname = 'Size (Mb)'
for size in sizes:
for thread in threads:
results.ix[size,thread] = test_upload_speed(size, thread)
print(results)
print()
if __name__ == "__main__":
main()
| apache-2.0 |
aavanian/bokeh | bokeh/sampledata/airports.py | 5 | 2832 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2017, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' The data in airports.json is a subset of US airports with field
elevations > 1500 meters. The query result was taken from
.. code-block:: none
http://services.nationalmap.gov/arcgis/rest/services/GlobalMap/GlobalMapWFS/MapServer/10/query
on October 15, 2015.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import json
# External imports
# Bokeh imports
from ..util.dependencies import import_required
from ..util.sampledata import external_path
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'data',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
def _read_data():
'''
'''
pd = import_required('pandas', 'airports sample data requires Pandas (http://pandas.pydata.org) to be installed')
with open(external_path('airports.json'), 'r') as f:
content = f.read()
airports = json.loads(content)
schema = [['attributes', 'nam'], ['attributes', 'zv3'], ['geometry', 'x'], ['geometry', 'y']]
data = pd.io.json.json_normalize(airports['features'], meta=schema)
data.rename(columns={'attributes.nam': 'name', 'attributes.zv3': 'elevation'}, inplace=True)
data.rename(columns={'geometry.x': 'x', 'geometry.y': 'y'}, inplace=True)
return data
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
data = _read_data()
| bsd-3-clause |
scotthartbti/android_external_chromium_org | ppapi/native_client/tests/breakpad_crash_test/crash_dump_tester.py | 154 | 8545 | #!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
import sys
import tempfile
import time
script_dir = os.path.dirname(__file__)
sys.path.append(os.path.join(script_dir,
'../../tools/browser_tester'))
import browser_tester
import browsertester.browserlauncher
# This script extends browser_tester to check for the presence of
# Breakpad crash dumps.
# This reads a file of lines containing 'key:value' pairs.
# The file contains entries like the following:
# plat:Win32
# prod:Chromium
# ptype:nacl-loader
# rept:crash svc
def ReadDumpTxtFile(filename):
dump_info = {}
fh = open(filename, 'r')
for line in fh:
if ':' in line:
key, value = line.rstrip().split(':', 1)
dump_info[key] = value
fh.close()
return dump_info
def StartCrashService(browser_path, dumps_dir, windows_pipe_name,
cleanup_funcs, crash_service_exe,
skip_if_missing=False):
# Find crash_service.exe relative to chrome.exe. This is a bit icky.
browser_dir = os.path.dirname(browser_path)
crash_service_path = os.path.join(browser_dir, crash_service_exe)
if skip_if_missing and not os.path.exists(crash_service_path):
return
proc = subprocess.Popen([crash_service_path,
'--v=1', # Verbose output for debugging failures
'--dumps-dir=%s' % dumps_dir,
'--pipe-name=%s' % windows_pipe_name])
def Cleanup():
# Note that if the process has already exited, this will raise
# an 'Access is denied' WindowsError exception, but
# crash_service.exe is not supposed to do this and such
# behaviour should make the test fail.
proc.terminate()
status = proc.wait()
sys.stdout.write('crash_dump_tester: %s exited with status %s\n'
% (crash_service_exe, status))
cleanup_funcs.append(Cleanup)
def ListPathsInDir(dir_path):
if os.path.exists(dir_path):
return [os.path.join(dir_path, name)
for name in os.listdir(dir_path)]
else:
return []
def GetDumpFiles(dumps_dirs):
all_files = [filename
for dumps_dir in dumps_dirs
for filename in ListPathsInDir(dumps_dir)]
sys.stdout.write('crash_dump_tester: Found %i files\n' % len(all_files))
for dump_file in all_files:
sys.stdout.write(' %s (size %i)\n'
% (dump_file, os.stat(dump_file).st_size))
return [dump_file for dump_file in all_files
if dump_file.endswith('.dmp')]
def Main(cleanup_funcs):
parser = browser_tester.BuildArgParser()
parser.add_option('--expected_crash_dumps', dest='expected_crash_dumps',
type=int, default=0,
help='The number of crash dumps that we should expect')
parser.add_option('--expected_process_type_for_crash',
dest='expected_process_type_for_crash',
type=str, default='nacl-loader',
help='The type of Chromium process that we expect the '
'crash dump to be for')
# Ideally we would just query the OS here to find out whether we are
# running x86-32 or x86-64 Windows, but Python's win32api module
# does not contain a wrapper for GetNativeSystemInfo(), which is
# what NaCl uses to check this, or for IsWow64Process(), which is
# what Chromium uses. Instead, we just rely on the build system to
# tell us.
parser.add_option('--win64', dest='win64', action='store_true',
help='Pass this if we are running tests for x86-64 Windows')
options, args = parser.parse_args()
temp_dir = tempfile.mkdtemp(prefix='nacl_crash_dump_tester_')
def CleanUpTempDir():
browsertester.browserlauncher.RemoveDirectory(temp_dir)
cleanup_funcs.append(CleanUpTempDir)
# To get a guaranteed unique pipe name, use the base name of the
# directory we just created.
windows_pipe_name = r'\\.\pipe\%s_crash_service' % os.path.basename(temp_dir)
# This environment variable enables Breakpad crash dumping in
# non-official builds of Chromium.
os.environ['CHROME_HEADLESS'] = '1'
if sys.platform == 'win32':
dumps_dir = temp_dir
# Override the default (global) Windows pipe name that Chromium will
# use for out-of-process crash reporting.
os.environ['CHROME_BREAKPAD_PIPE_NAME'] = windows_pipe_name
# Launch the x86-32 crash service so that we can handle crashes in
# the browser process.
StartCrashService(options.browser_path, dumps_dir, windows_pipe_name,
cleanup_funcs, 'crash_service.exe')
if options.win64:
# Launch the x86-64 crash service so that we can handle crashes
# in the NaCl loader process (nacl64.exe).
# Skip if missing, since in win64 builds crash_service.exe is 64-bit
# and crash_service64.exe does not exist.
StartCrashService(options.browser_path, dumps_dir, windows_pipe_name,
cleanup_funcs, 'crash_service64.exe',
skip_if_missing=True)
# We add a delay because there is probably a race condition:
# crash_service.exe might not have finished doing
# CreateNamedPipe() before NaCl does a crash dump and tries to
# connect to that pipe.
# TODO(mseaborn): We could change crash_service.exe to report when
# it has successfully created the named pipe.
time.sleep(1)
elif sys.platform == 'darwin':
dumps_dir = temp_dir
os.environ['BREAKPAD_DUMP_LOCATION'] = dumps_dir
elif sys.platform.startswith('linux'):
# The "--user-data-dir" option is not effective for the Breakpad
# setup in Linux Chromium, because Breakpad is initialized before
# "--user-data-dir" is read. So we set HOME to redirect the crash
# dumps to a temporary directory.
home_dir = temp_dir
os.environ['HOME'] = home_dir
options.enable_crash_reporter = True
result = browser_tester.Run(options.url, options)
# Find crash dump results.
if sys.platform.startswith('linux'):
# Look in "~/.config/*/Crash Reports". This will find crash
# reports under ~/.config/chromium or ~/.config/google-chrome, or
# under other subdirectories in case the branding is changed.
dumps_dirs = [os.path.join(path, 'Crash Reports')
for path in ListPathsInDir(os.path.join(home_dir, '.config'))]
else:
dumps_dirs = [dumps_dir]
dmp_files = GetDumpFiles(dumps_dirs)
failed = False
msg = ('crash_dump_tester: ERROR: Got %i crash dumps but expected %i\n' %
(len(dmp_files), options.expected_crash_dumps))
if len(dmp_files) != options.expected_crash_dumps:
sys.stdout.write(msg)
failed = True
for dump_file in dmp_files:
# Sanity check: Make sure dumping did not fail after opening the file.
msg = 'crash_dump_tester: ERROR: Dump file is empty\n'
if os.stat(dump_file).st_size == 0:
sys.stdout.write(msg)
failed = True
# On Windows, the crash dumps should come in pairs of a .dmp and
# .txt file.
if sys.platform == 'win32':
second_file = dump_file[:-4] + '.txt'
msg = ('crash_dump_tester: ERROR: File %r is missing a corresponding '
'%r file\n' % (dump_file, second_file))
if not os.path.exists(second_file):
sys.stdout.write(msg)
failed = True
continue
# Check that the crash dump comes from the NaCl process.
dump_info = ReadDumpTxtFile(second_file)
if 'ptype' in dump_info:
msg = ('crash_dump_tester: ERROR: Unexpected ptype value: %r != %r\n'
% (dump_info['ptype'], options.expected_process_type_for_crash))
if dump_info['ptype'] != options.expected_process_type_for_crash:
sys.stdout.write(msg)
failed = True
else:
sys.stdout.write('crash_dump_tester: ERROR: Missing ptype field\n')
failed = True
# TODO(mseaborn): Ideally we would also check that a backtrace
# containing an expected function name can be extracted from the
# crash dump.
if failed:
sys.stdout.write('crash_dump_tester: FAILED\n')
result = 1
else:
sys.stdout.write('crash_dump_tester: PASSED\n')
return result
def MainWrapper():
cleanup_funcs = []
try:
return Main(cleanup_funcs)
finally:
for func in cleanup_funcs:
func()
if __name__ == '__main__':
sys.exit(MainWrapper())
| bsd-3-clause |
benwilliams337/cs470 | potential/show_field.py | 9 | 2996 | #!/usr/bin/env python
"""
README
This should get some of the boilerplate out of the way for you in visualizing
your potential fields.
Notably absent from this code is anything dealing with vectors, the
interaction of mutliple fields, etc. Your code will be a lot easier if you
define your potential fields in terms of vectors (angle, magnitude). For
plotting, however, you'll need to turn them back into dx and dy.
"""
import matplotlib.pyplot as plt
from pylab import *
import math
import random
from math import atan2, cos, sin, sqrt, pi
##### PLOTTING FUNCTIONS #####
def show_obstacle(plot, points):
"""Draw a polygon. Points is a list if [x,y] tuples
"""
for p1, p2 in zip(points, [points[-1]] + list(points)):
plot.plot([p1[0], p2[0]], [p1[1], p2[1]], 'b')
def show_arrows(plot, potential_func, xlim=(-400, 400), ylim=(-400, 400), res=20):
"""
Arguments:
fns: a list of potential field functions
xlim, ylim: the limits of the plot
res: resolution for (spacing between) arrows
"""
plot.set_xlim(xlim)
plot.set_ylim(ylim)
for x in range(xlim[0], xlim[1] + res, res):
for y in range(ylim[0], ylim[1] + res, res):
dx, dy = potential_func(x, y, res)
if dx + dy == 0: continue
plot.arrow(x, y, dx, dy, head_width=res/7.0, color='red', linewidth=.3)
def plot_single(potential_func, obstacles, filename, xlim=(-400, 400), ylim=(-400, 400)):
"""Plot a potential function and some obstacles, and write the resulting
image to a file"""
print "Generating", filename
fig = plt.figure()
plot = plt.subplot(111)
show_arrows(plot, potential_func, xlim=xlim, ylim=ylim)
for obstacle in obstacles:
show_obstacle(plot, obstacle)
fig.savefig(filename, format='png')
#### TRIVIAL EXAMPLE FUNCTIONS ####
def random_field(x, y, res):
"""
NOTE: Your potential field calculator should probably work in vectors
(angle, magnitude), but you need to return dx, dy for plotting.
Arguments:
x: the x position for which to calculate the potential field
y: the y position for which to calculate the potential field
res: current plotting resolution (helpful for scaling down your
vectors for display, so they don't all overlap each other)
Returns:
dx, dy: the change in x and y for the arrow to point.
"""
return random.randint(-res, res), random.randint(-res, res)
def unidirectional(x, y, res):
"""Another simple example"""
return res, res/2
def bidirectional(x, y, res):
if x > 0:
return res, res/2
else:
return -res, res/2
def main():
triangle = ((0, 0), (100, 100), (-100, 50))
plot_single(random_field, [triangle], 'random.png')
plot_single(unidirectional, [triangle], 'unidirectional.png')
plot_single(bidirectional, [triangle], 'bidirectional.png')
if __name__ == '__main__':
main()
# vim: et sw=4 sts=4
| gpl-3.0 |
iamaris/anaMini | sthist.py | 1 | 1870 | import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
#f = open('st.txt','r')
#print f.readlines()
#f.close()
st = []
x1 = []
x2 = []
x3 = []
x4 = []
x5 = []
x6 = []
x7 = []
x8 = []
a = np.genfromtxt("st.txt")
for i in a:
st.append(i[0])
x1.append(i[1])
x2.append(i[2])
x3.append(i[3])
x4.append(i[4])
x5.append(i[5])
x6.append(i[6])
x7.append(i[7])
x8.append(i[8])
s1 = np.array(x1)
s2 = np.array(x2)
s3 = np.array(x3)
s4 = np.array(x4)
s5 = np.array(x5)
s6 = np.array(x6)
s7 = np.array(x7)
s8 = np.array(x8)
s23 = s2+s3
y4 = np.divide(s4,s23)
y5 = np.divide(s5,s23)
y6 = np.divide(s6,s23)
y7 = np.divide(s7,s23)
y8 = np.divide(s8,s23)
e4 = np.sqrt(np.divide(np.sqrt(s4),s4)*np.divide(np.sqrt(s4),s4)+np.divide(np.sqrt(s23),s23)*np.divide(np.sqrt(s23),s23))
e5 = np.sqrt(np.divide(np.sqrt(s5),s5)*np.divide(np.sqrt(s5),s5)+np.divide(np.sqrt(s23),s23)*np.divide(np.sqrt(s23),s23))
e6 = np.sqrt(np.divide(np.sqrt(s6),s6)*np.divide(np.sqrt(s6),s6)+np.divide(np.sqrt(s23),s23)*np.divide(np.sqrt(s23),s23))
e7 = np.sqrt(np.divide(np.sqrt(s7),s7)*np.divide(np.sqrt(s7),s7)+np.divide(np.sqrt(s23),s23)*np.divide(np.sqrt(s23),s23))
e8 = np.sqrt(np.divide(np.sqrt(s8),s8)*np.divide(np.sqrt(s8),s8)+np.divide(np.sqrt(s23),s23)*np.divide(np.sqrt(s23),s23))
#z23 = plt.errorbar(st, s23, yerr=np.sqrt(s23),fmt='o')
#z4 = plt.errorbar(st, y4, yerr=e4,fmt='o')
#z5 = plt.errorbar(st, y5, yerr=e5,fmt='o')
#z6 = plt.errorbar(st, y6, yerr=e6,fmt='o')
#z7 = plt.errorbar(st, y7, yerr=e7,fmt='o')
##z8 = plt.errorbar(st, y8, yerr=e8,fmt='o')
plt.errorbar(st, y4, fmt='o')
plt.errorbar(st, y5, fmt='o')
plt.errorbar(st, y6, fmt='o')
plt.errorbar(st, y7, fmt='o')
plt.errorbar(st, y8, fmt='o')
plt.xlabel('St')
plt.ylabel('# Event')
plt.title('St')
# Tweak spacing to prevent clipping of ylabel
plt.subplots_adjust(left=0.15)
plt.show()
| mit |
manashmndl/scikit-learn | examples/linear_model/plot_ols_ridge_variance.py | 387 | 2060 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Ordinary Least Squares and Ridge Regression Variance
=========================================================
Due to the few points in each dimension and the straight
line that linear regression uses to follow these points
as well as it can, noise on the observations will cause
great variance as shown in the first plot. Every line's slope
can vary quite a bit for each prediction due to the noise
induced in the observations.
Ridge regression is basically minimizing a penalised version
of the least-squared function. The penalising `shrinks` the
value of the regression coefficients.
Despite the few data points in each dimension, the slope
of the prediction is much more stable and the variance
in the line itself is greatly reduced, in comparison to that
of the standard linear regression
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
X_train = np.c_[.5, 1].T
y_train = [.5, 1]
X_test = np.c_[0, 2].T
np.random.seed(0)
classifiers = dict(ols=linear_model.LinearRegression(),
ridge=linear_model.Ridge(alpha=.1))
fignum = 1
for name, clf in classifiers.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.title(name)
ax = plt.axes([.12, .12, .8, .8])
for _ in range(6):
this_X = .1 * np.random.normal(size=(2, 1)) + X_train
clf.fit(this_X, y_train)
ax.plot(X_test, clf.predict(X_test), color='.5')
ax.scatter(this_X, y_train, s=3, c='.5', marker='o', zorder=10)
clf.fit(X_train, y_train)
ax.plot(X_test, clf.predict(X_test), linewidth=2, color='blue')
ax.scatter(X_train, y_train, s=30, c='r', marker='+', zorder=10)
ax.set_xticks(())
ax.set_yticks(())
ax.set_ylim((0, 1.6))
ax.set_xlabel('X')
ax.set_ylabel('y')
ax.set_xlim(0, 2)
fignum += 1
plt.show()
| bsd-3-clause |
liyu1990/sklearn | benchmarks/bench_sparsify.py | 323 | 3372 | """
Benchmark SGD prediction time with dense/sparse coefficients.
Invoke with
-----------
$ kernprof.py -l sparsity_benchmark.py
$ python -m line_profiler sparsity_benchmark.py.lprof
Typical output
--------------
input data sparsity: 0.050000
true coef sparsity: 0.000100
test data sparsity: 0.027400
model sparsity: 0.000024
r^2 on test data (dense model) : 0.233651
r^2 on test data (sparse model) : 0.233651
Wrote profile results to sparsity_benchmark.py.lprof
Timer unit: 1e-06 s
File: sparsity_benchmark.py
Function: benchmark_dense_predict at line 51
Total time: 0.532979 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
51 @profile
52 def benchmark_dense_predict():
53 301 640 2.1 0.1 for _ in range(300):
54 300 532339 1774.5 99.9 clf.predict(X_test)
File: sparsity_benchmark.py
Function: benchmark_sparse_predict at line 56
Total time: 0.39274 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
56 @profile
57 def benchmark_sparse_predict():
58 1 10854 10854.0 2.8 X_test_sparse = csr_matrix(X_test)
59 301 477 1.6 0.1 for _ in range(300):
60 300 381409 1271.4 97.1 clf.predict(X_test_sparse)
"""
from scipy.sparse.csr import csr_matrix
import numpy as np
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.metrics import r2_score
np.random.seed(42)
def sparsity_ratio(X):
return np.count_nonzero(X) / float(n_samples * n_features)
n_samples, n_features = 5000, 300
X = np.random.randn(n_samples, n_features)
inds = np.arange(n_samples)
np.random.shuffle(inds)
X[inds[int(n_features / 1.2):]] = 0 # sparsify input
print("input data sparsity: %f" % sparsity_ratio(X))
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[n_features/2:]] = 0 # sparsify coef
print("true coef sparsity: %f" % sparsity_ratio(coef))
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples / 2], y[:n_samples / 2]
X_test, y_test = X[n_samples / 2:], y[n_samples / 2:]
print("test data sparsity: %f" % sparsity_ratio(X_test))
###############################################################################
clf = SGDRegressor(penalty='l1', alpha=.2, fit_intercept=True, n_iter=2000)
clf.fit(X_train, y_train)
print("model sparsity: %f" % sparsity_ratio(clf.coef_))
def benchmark_dense_predict():
for _ in range(300):
clf.predict(X_test)
def benchmark_sparse_predict():
X_test_sparse = csr_matrix(X_test)
for _ in range(300):
clf.predict(X_test_sparse)
def score(y_test, y_pred, case):
r2 = r2_score(y_test, y_pred)
print("r^2 on test data (%s) : %f" % (case, r2))
score(y_test, clf.predict(X_test), 'dense model')
benchmark_dense_predict()
clf.sparsify()
score(y_test, clf.predict(X_test), 'sparse model')
benchmark_sparse_predict()
| bsd-3-clause |
Fireblend/scikit-learn | sklearn/svm/tests/test_sparse.py | 32 | 12988 | from nose.tools import assert_raises, assert_true, assert_false
import numpy as np
from scipy import sparse
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_equal)
from sklearn import datasets, svm, linear_model, base
from sklearn.datasets import make_classification, load_digits, make_blobs
from sklearn.svm.tests import test_svm
from sklearn.utils import ConvergenceWarning
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.utils.testing import assert_warns, assert_raise_message
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
X_sp = sparse.lil_matrix(X)
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2
X2 = np.array([[0, 0, 0], [1, 1, 1], [2, 0, 0, ],
[0, 0, 2], [3, 3, 3]])
X2_sp = sparse.dok_matrix(X2)
Y2 = [1, 2, 2, 2, 3]
T2 = np.array([[-1, -1, -1], [1, 1, 1], [2, 2, 2]])
true_result2 = [1, 2, 3]
iris = datasets.load_iris()
# permute
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# sparsify
iris.data = sparse.csr_matrix(iris.data)
def check_svm_model_equal(dense_svm, sparse_svm, X_train, y_train, X_test):
dense_svm.fit(X_train.toarray(), y_train)
if sparse.isspmatrix(X_test):
X_test_dense = X_test.toarray()
else:
X_test_dense = X_test
sparse_svm.fit(X_train, y_train)
assert_true(sparse.issparse(sparse_svm.support_vectors_))
assert_true(sparse.issparse(sparse_svm.dual_coef_))
assert_array_almost_equal(dense_svm.support_vectors_,
sparse_svm.support_vectors_.toarray())
assert_array_almost_equal(dense_svm.dual_coef_, sparse_svm.dual_coef_.toarray())
if dense_svm.kernel == "linear":
assert_true(sparse.issparse(sparse_svm.coef_))
assert_array_almost_equal(dense_svm.coef_, sparse_svm.coef_.toarray())
assert_array_almost_equal(dense_svm.support_, sparse_svm.support_)
assert_array_almost_equal(dense_svm.predict(X_test_dense), sparse_svm.predict(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test_dense))
if isinstance(dense_svm, svm.OneClassSVM):
msg = "cannot use sparse input in 'OneClassSVM' trained on dense data"
else:
assert_array_almost_equal(dense_svm.predict_proba(X_test_dense),
sparse_svm.predict_proba(X_test), 4)
msg = "cannot use sparse input in 'SVC' trained on dense data"
if sparse.isspmatrix(X_test):
assert_raise_message(ValueError, msg, dense_svm.predict, X_test)
def test_svc():
"""Check that sparse SVC gives the same result as SVC"""
# many class dataset:
X_blobs, y_blobs = make_blobs(n_samples=100, centers=10, random_state=0)
X_blobs = sparse.csr_matrix(X_blobs)
datasets = [[X_sp, Y, T], [X2_sp, Y2, T2],
[X_blobs[:80], y_blobs[:80], X_blobs[80:]],
[iris.data, iris.target, iris.data]]
kernels = ["linear", "poly", "rbf", "sigmoid"]
for dataset in datasets:
for kernel in kernels:
clf = svm.SVC(kernel=kernel, probability=True, random_state=0)
sp_clf = svm.SVC(kernel=kernel, probability=True, random_state=0)
check_svm_model_equal(clf, sp_clf, *dataset)
def test_unsorted_indices():
# test that the result with sorted and unsorted indices in csr is the same
# we use a subset of digits as iris, blobs or make_classification didn't
# show the problem
digits = load_digits()
X, y = digits.data[:50], digits.target[:50]
X_test = sparse.csr_matrix(digits.data[50:100])
X_sparse = sparse.csr_matrix(X)
coef_dense = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X, y).coef_
sparse_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse, y)
coef_sorted = sparse_svc.coef_
# make sure dense and sparse SVM give the same result
assert_array_almost_equal(coef_dense, coef_sorted.toarray())
X_sparse_unsorted = X_sparse[np.arange(X.shape[0])]
X_test_unsorted = X_test[np.arange(X_test.shape[0])]
# make sure we scramble the indices
assert_false(X_sparse_unsorted.has_sorted_indices)
assert_false(X_test_unsorted.has_sorted_indices)
unsorted_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse_unsorted, y)
coef_unsorted = unsorted_svc.coef_
# make sure unsorted indices give same result
assert_array_almost_equal(coef_unsorted.toarray(), coef_sorted.toarray())
assert_array_almost_equal(sparse_svc.predict_proba(X_test_unsorted),
sparse_svc.predict_proba(X_test))
def test_svc_with_custom_kernel():
kfunc = lambda x, y: safe_sparse_dot(x, y.T)
clf_lin = svm.SVC(kernel='linear').fit(X_sp, Y)
clf_mylin = svm.SVC(kernel=kfunc).fit(X_sp, Y)
assert_array_equal(clf_lin.predict(X_sp), clf_mylin.predict(X_sp))
def test_svc_iris():
# Test the sparse SVC with the iris dataset
for k in ('linear', 'poly', 'rbf'):
sp_clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
clf = svm.SVC(kernel=k).fit(iris.data.toarray(), iris.target)
assert_array_almost_equal(clf.support_vectors_,
sp_clf.support_vectors_.toarray())
assert_array_almost_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
if k == 'linear':
assert_array_almost_equal(clf.coef_, sp_clf.coef_.toarray())
def test_sparse_decision_function():
#Test decision_function
#Sanity check, test that decision_function implemented in python
#returns the same as the one in libsvm
# multi class:
clf = svm.SVC(kernel='linear', C=0.1).fit(iris.data, iris.target)
dec = safe_sparse_dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int).ravel()])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
def test_error():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X_sp, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X_sp, Y2)
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict(T), true_result)
def test_linearsvc():
# Similar to test_SVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
sp_clf = svm.LinearSVC(random_state=0).fit(X_sp, Y)
assert_true(sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
assert_array_almost_equal(clf.predict(X), sp_clf.predict(X_sp))
clf.fit(X2, Y2)
sp_clf.fit(X2_sp, Y2)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
def test_linearsvc_iris():
# Test the sparse LinearSVC with the iris dataset
sp_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
clf = svm.LinearSVC(random_state=0).fit(iris.data.toarray(), iris.target)
assert_equal(clf.fit_intercept, sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=1)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=1)
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
# check decision_function
pred = np.argmax(sp_clf.decision_function(iris.data), 1)
assert_array_almost_equal(pred, clf.predict(iris.data.toarray()))
# sparsify the coefficients on both models and check that they still
# produce the same results
clf.sparsify()
assert_array_equal(pred, clf.predict(iris.data))
sp_clf.sparsify()
assert_array_equal(pred, sp_clf.predict(iris.data))
def test_weight():
# Test class weights
X_, y_ = make_classification(n_samples=200, n_features=100,
weights=[0.833, 0.167], random_state=0)
X_ = sparse.csr_matrix(X_)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0),
svm.SVC()):
clf.set_params(class_weight={0: 5})
clf.fit(X_[:180], y_[:180])
y_pred = clf.predict(X_[180:])
assert_true(np.sum(y_pred == y_[180:]) >= 11)
def test_sample_weights():
# Test weights on individual samples
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict(X[2]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X_sp, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X[2]), [2.])
def test_sparse_liblinear_intercept_handling():
# Test that sparse liblinear honours intercept_scaling param
test_svm.test_dense_liblinear_intercept_handling(svm.LinearSVC)
def test_sparse_oneclasssvm():
"""Check that sparse OneClassSVM gives the same result as dense OneClassSVM"""
# many class dataset:
X_blobs, _ = make_blobs(n_samples=100, centers=10, random_state=0)
X_blobs = sparse.csr_matrix(X_blobs)
datasets = [[X_sp, None, T], [X2_sp, None, T2],
[X_blobs[:80], None, X_blobs[80:]],
[iris.data, None, iris.data]]
kernels = ["linear", "poly", "rbf", "sigmoid"]
for dataset in datasets:
for kernel in kernels:
clf = svm.OneClassSVM(kernel=kernel, random_state=0)
sp_clf = svm.OneClassSVM(kernel=kernel, random_state=0)
check_svm_model_equal(clf, sp_clf, *dataset)
def test_sparse_realdata():
# Test on a subset from the 20newsgroups dataset.
# This catchs some bugs if input is not correctly converted into
# sparse format or weights are not correctly initialized.
data = np.array([0.03771744, 0.1003567, 0.01174647, 0.027069])
indices = np.array([6, 5, 35, 31])
indptr = np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4])
X = sparse.csr_matrix((data, indices, indptr))
y = np.array(
[1., 0., 2., 2., 1., 1., 1., 2., 2., 0., 1., 2., 2.,
0., 2., 0., 3., 0., 3., 0., 1., 1., 3., 2., 3., 2.,
0., 3., 1., 0., 2., 1., 2., 0., 1., 0., 2., 3., 1.,
3., 0., 1., 0., 0., 2., 0., 1., 2., 2., 2., 3., 2.,
0., 3., 2., 1., 2., 3., 2., 2., 0., 1., 0., 1., 2.,
3., 0., 0., 2., 2., 1., 3., 1., 1., 0., 1., 2., 1.,
1., 3.])
clf = svm.SVC(kernel='linear').fit(X.toarray(), y)
sp_clf = svm.SVC(kernel='linear').fit(sparse.coo_matrix(X), y)
assert_array_equal(clf.support_vectors_, sp_clf.support_vectors_.toarray())
assert_array_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
def test_sparse_svc_clone_with_callable_kernel():
# Test that the "dense_fit" is called even though we use sparse input
# meaning that everything works fine.
a = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0)
b = base.clone(a)
b.fit(X_sp, Y)
pred = b.predict(X_sp)
b.predict_proba(X_sp)
dense_svm = svm.SVC(C=1, kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0)
pred_dense = dense_svm.fit(X, Y).predict(X)
assert_array_equal(pred_dense, pred)
# b.decision_function(X_sp) # XXX : should be supported
def test_timeout():
sp = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, sp.fit, X_sp, Y)
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
| bsd-3-clause |
FRESNA/PyPSA | examples/new_components/component_library.py | 1 | 5522 |
#Create a library with its own Network class that has a new CHP
#component and a new LOPF function for the CHP constraints
#NB: This only works with Python 3 because of super()
import pypsa, pandas as pd, numpy as np
from pypsa.descriptors import Dict
from pyomo.environ import Constraint
override_components = pypsa.components.components.copy()
override_components.loc["ShadowPrice"] = ["shadow_prices","Shadow price for a global constraint.",np.nan]
override_components.loc["CHP"] = ["chps","Combined heat and power plant.",np.nan]
override_component_attrs = Dict({k : v.copy() for k,v in pypsa.components.component_attrs.items()})
override_component_attrs["ShadowPrice"] = pd.DataFrame(columns = ["type","unit","default","description","status"])
override_component_attrs["ShadowPrice"].loc["name"] = ["string","n/a","n/a","Unique name","Input (required)"]
override_component_attrs["ShadowPrice"].loc["value"] = ["float","n/a",0.,"shadow value","Output"]
override_component_attrs["CHP"] = pd.DataFrame(columns = ["type","unit","default","description","status"])
override_component_attrs["CHP"].loc["name"] = ["string","n/a","n/a","Unique name","Input (required)"]
override_component_attrs["CHP"].loc["bus_fuel"] = ["string","n/a","n/a","Name of bus where fuel source is.","Input (required)"]
override_component_attrs["CHP"].loc["bus_elec"] = ["string","n/a","n/a","Name of bus where electricity is supplied.","Input (required)"]
override_component_attrs["CHP"].loc["bus_heat"] = ["string","n/a","n/a","Name of bus where heat is supplied.","Input (required)"]
override_component_attrs["CHP"].loc["p_nom_extendable"] = ["boolean","n/a",False,"","Input (optional)"]
override_component_attrs["CHP"].loc["capital_cost"] = ["float","EUR/MW",0.,"Capital cost per rating of electricity output.","Input (optional)"]
override_component_attrs["CHP"].loc["eta_elec"] = ["float","n/a",1.,"Electrical efficiency with no heat output, i.e. in condensing mode","Input (optional)"]
override_component_attrs["CHP"].loc["c_v"] = ["float","n/a",1.,"Loss of fuel for each addition of heat","Input (optional)"]
override_component_attrs["CHP"].loc["c_m"] = ["float","n/a",1.,"Backpressure ratio","Input (optional)"]
override_component_attrs["CHP"].loc["p_nom_ratio"] = ["float","n/a",1.,"Ratio of max heat output to max electrical output; max heat of 500 MWth and max electricity of 1000 MWth means p_nom_ratio is 0.5","Input (optional)"]
class Network(pypsa.Network):
def __init__(self,*args,**kwargs):
kwargs["override_components"]=override_components
kwargs["override_component_attrs"]=override_component_attrs
super().__init__(*args,**kwargs)
def lopf(self,*args,**kwargs):
#at this point check that all the extra links are in place for the CHPs
if not self.chps.empty:
self.madd("Link",
self.chps.index + " electric",
bus0=self.chps.bus_source.values,
bus1=self.chps.bus_elec.values,
p_nom_extendable=self.chps.p_nom_extendable.values,
capital_cost=self.chps.capital_cost.values*self.chps.eta_elec.values,
efficiency=self.chps.eta_elec.values)
self.madd("Link",
self.chps.index + " heat",
bus0=self.chps.bus_source.values,
bus1=self.chps.bus_heat.values,
p_nom_extendable=self.chps.p_nom_extendable.values,
efficiency=self.chps.eta_elec.values/self.chps.c_v.values)
if "extra_functionality" in kwargs:
user_extra_func = kwargs.pop('extra_functionality')
else:
user_extra_func = None
#the following function should add to any extra_functionality in kwargs
def extra_func(network, snapshots):
#at this point add the constraints for the CHPs
if not network.chps.empty:
print("Setting up CHPs:",network.chps.index)
def chp_nom(model, chp):
return network.chps.at[chp,"eta_elec"]*network.chps.at[chp,'p_nom_ratio']*model.link_p_nom[chp + " electric"] == network.chps.at[chp,"eta_elec"]/network.chps.at[chp,"c_v"]*model.link_p_nom[chp + " heat"]
network.model.chp_nom = Constraint(list(network.chps.index),rule=chp_nom)
def backpressure(model,chp,snapshot):
return network.chps.at[chp,'c_m']*network.chps.at[chp,"eta_elec"]/network.chps.at[chp,"c_v"]*model.link_p[chp + " heat",snapshot] <= network.chps.at[chp,"eta_elec"]*model.link_p[chp + " electric",snapshot]
network.model.backpressure = Constraint(list(network.chps.index),list(snapshots),rule=backpressure)
def top_iso_fuel_line(model,chp,snapshot):
return model.link_p[chp + " heat",snapshot] + model.link_p[chp + " electric",snapshot] <= model.link_p_nom[chp + " electric"]
network.model.top_iso_fuel_line = Constraint(list(network.chps.index),list(snapshots),rule=top_iso_fuel_line)
if user_extra_func is not None:
print("Now doing user defined extra functionality")
user_extra_func(network,snapshots)
kwargs["extra_functionality"]=extra_func
super().lopf(*args,**kwargs)
#Afterwards you can process the outputs, e.g. into network.chps_t.p_out
#You could also delete the auxiliary links created above
| gpl-3.0 |
moonbury/pythonanywhere | MasteringMLWithScikit-learn/8365OS_07_Codes/pca-3d-plot.py | 3 | 1392 | import matplotlib
matplotlib.use('Qt4Agg')
import numpy as np
import pylab as pl
from mpl_toolkits.mplot3d import Axes3D
from sklearn import decomposition
from sklearn import datasets
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
fig = pl.figure(1, figsize=(4, 3))
pl.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
pl.cla()
pca = decomposition.PCA(n_components=3)
pca.fit(X)
X = pca.transform(X)
for name, label in [('Setosa', 0), ('Versicolour', 1), ('Virginica', 2)]:
ax.text3D(X[y == label, 0].mean(),
X[y == label, 1].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=y, cmap=pl.cm.spectral)
x_surf = [X[:, 0].min(), X[:, 0].max(),
X[:, 0].min(), X[:, 0].max()]
y_surf = [X[:, 0].max(), X[:, 0].max(),
X[:, 0].min(), X[:, 0].min()]
x_surf = np.array(x_surf)
y_surf = np.array(y_surf)
v0 = pca.transform(pca.components_[0])
v0 /= v0[-1]
v1 = pca.transform(pca.components_[1])
v1 /= v1[-1]
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
pl.show() | gpl-3.0 |
afgaron/rgz-analysis | python/test_consensus.py | 2 | 15556 | from __future__ import division
# Local RGZ modules
import collinearity
from load_contours import get_contours,make_pathdict
# Default packages
import datetime
import operator
from collections import Counter
import cStringIO
import urllib
import json
import os.path
import time
import shutil
# Other packages
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.pyplot import cm
from matplotlib.path import Path
import matplotlib.patches as patches
from scipy.ndimage.filters import maximum_filter
from scipy import stats
from scipy.ndimage.morphology import generate_binary_structure, binary_erosion
from scipy.linalg.basic import LinAlgError
from astropy.io import fits
from astropy import wcs
from pymongo import MongoClient
from PIL import Image
# MongoDB parameters
client = MongoClient('localhost', 27017)
db = client['radio']
subjects = db['radio_subjects'] # subjects = images
classifications = db['radio_classifications'] # classifications = classifications of each subject per user
# General variables for the RGZ sample
main_release_date = datetime.datetime(2013, 12, 17, 0, 0, 0, 0)
IMG_HEIGHT_OLD = 424.0 # number of pixels in the original JPG image along the y axis
IMG_WIDTH_OLD = 424.0 # number of pixels in the original JPG image along the x axis
IMG_HEIGHT_NEW = 500.0 # number of pixels in the downloaded JPG image along the y axis
IMG_WIDTH_NEW = 500.0 # number of pixels in the downloaded JPG image along the x axis
FITS_HEIGHT = 301.0 # number of pixels in the FITS image (?) along the y axis
FITS_WIDTH = 301.0 # number of pixels in the FITS image (?) along the x axis
FIRST_FITS_HEIGHT = 132.0 # number of pixels in the FITS image along the y axis
FIRST_FITS_WIDTH = 132.0 # number of pixels in the FITS image along the y axis
# Need to add parameters for ATLAS, both IR and radio.
PIXEL_SIZE = 0.00016667#/3600.0 # the number of arcseconds per pixel in the FITS image
xmin = 1.
xmax = IMG_HEIGHT_NEW
ymin = 1.
ymax = IMG_WIDTH_NEW
bad_keys = ('finished_at','started_at','user_agent','lang','pending')
expert_names = [u'42jkb', u'ivywong', u'stasmanian', u'klmasters', u'Kevin', u'akapinska', u'enno.middelberg', u'xDocR', u'vrooje', u'KWillett', u'DocR']
# Paths
rgz_dir = '/Users/willettk/Astronomy/Research/GalaxyZoo/rgz-analysis'
pathdict = make_pathdict()
# Find the consensus classification for a single subject
@profile
def checksum(zid='ARG000255x',experts_only=False,excluded=[],no_anonymous=False,write_peak_data=False):
# Find the consensus for all users who have classified a particular galaxy
sub = subjects.find_one({'zooniverse_id':zid})
imgid = sub['_id']
# Classifications for this subject after launch date
class_params = {"subject_ids": imgid, "updated_at": {"$gt": main_release_date}}
# Only get the consensus classification for the science team members
if experts_only:
class_params['expert'] = True
# If comparing a particular volunteer (such as an expert), don't include self-comparison
if len(excluded) > 0:
class_params['user_name'] = {"$nin":excluded}
'''
# To exclude the experts:
class_params['expert'] = {"$exists":False}
'''
# To exclude anonymous classifications (registered users only):
if no_anonymous:
if class_params.has_key('user_name'):
class_params['user_name']["$exists"] = True
else:
class_params['user_name'] = {"$exists":True}
_c = classifications.find(class_params)
#clist_all = list(classifications.find(class_params))
# Empty dicts and lists
cdict = {}
checksum_list = []
unique_users = set()
#clen_start = len(clist_all)
clen_start = 0
listcount = []
# Compute the most popular combination for each NUMBER of galaxies identified in image
clist_all = []
#for c in clist_all:
for c in _c:
clist_all.append(c)
clen_start += 1
# Skip classification if they already did one?
try:
user_name = c['user_name']
except KeyError:
user_name = 'Anonymous'
if user_name not in unique_users or user_name is 'Anonymous':
unique_users.add(user_name)
listcount.append(True)
sumlist = [] # List of the checksums over all possible combinations
# Only find data that was an actual marking, not metadata
goodann = [x for x in c['annotations'] if (x.keys()[0] not in bad_keys)]
n_galaxies = len(goodann)
if n_galaxies > 0: # There must be at least one galaxy!
for idx,ann in enumerate(goodann):
xmaxlist = []
try:
radio_comps = ann['radio']
# loop over all the radio components within an galaxy
if radio_comps != 'No Contours':
for rc in radio_comps:
xmaxlist.append(float(radio_comps[rc]['xmax']))
# or make the value -99 if there are no contours
else:
xmaxlist.append(-99)
except KeyError:
xmaxlist.append(-99)
# To create a unique ID for the combination of radio components,
# take the product of all the xmax coordinates and sum them together.
product = reduce(operator.mul, xmaxlist, 1)
sumlist.append(round(product,3))
checksum = sum(sumlist)
else:
checksum = -99
checksum_list.append(checksum)
c['checksum'] = checksum
# Insert checksum into dictionary with number of galaxies as the index
if cdict.has_key(n_galaxies):
cdict[n_galaxies].append(checksum)
else:
cdict[n_galaxies] = [checksum]
else:
listcount.append(False)
checksum_list.append(-99)
#print 'Removing classification for %s' % user_name
# Remove duplicates and classifications for no object
#clist = [c for lc,c in zip(listcount,checksum_list) if lc and c != -99]
clist = [c for lc,c in zip(listcount,clist_all) if lc and c['checksum'] != -99]
clen_diff = clen_start - len(clist)
'''
if clen_diff > 0:
print '\nSkipping %i duplicated classifications for %s. %i good classifications total.' % (clen_diff,zid,len(clist))
'''
maxval=0
mc_checksum = 0.
# Find the number of galaxies that has the highest number of consensus classifications
for k,v in cdict.iteritems():
mc = Counter(v).most_common()
# Check if the most common selection coordinate was for no radio contours
if mc[0][0] == -99.0:
if len(mc) > 1:
# If so, take the selection with the next-highest number of counts
mc_best = mc[1]
else:
continue
# Selection with the highest number of counts
else:
mc_best = mc[0]
# If the new selection has more counts than the previous one, choose it as the best match;
# if tied or less than this, remain with the current consensus number of galaxies
if mc_best[1] > maxval:
maxval = mc_best[1]
mc_checksum = mc_best[0]
# Find a galaxy that matches the checksum (easier to keep track as a list)
try:
cmatch = next(i for i in clist if i['checksum'] == mc_checksum)
except StopIteration:
# Necessary for objects like ARG0003par; one classifier recorded 22 "No IR","No Contours" in a short space. Still shouldn't happen.
print 'No non-zero classifications recorded for %s' % zid
return None
'''
try:
index = clist.index(mc_checksum)
cmatch = _c[index]
except ValueError:
# Necessary for objects like ARG0003par; one classifier recorded 22 "No IR","No Contours" in a short space. Still shouldn't happen.
print 'No non-zero classifications recorded for %s' % zid
return None
'''
# Find IR peak for the checksummed galaxies
goodann = [x for x in cmatch['annotations'] if x.keys()[0] not in bad_keys]
# Find the sum of the xmax coordinates for each galaxy. This gives the index to search on.
cons = {}
cons['zid'] = zid
cons['source'] = sub['metadata']['source']
ir_x,ir_y = {},{}
cons['answer'] = {}
cons['n_users'] = maxval
cons['n_total'] = len(clist)
answer = cons['answer']
for k,gal in enumerate(goodann):
xmax_temp = []
bbox_temp = []
try:
for v in gal['radio'].itervalues():
xmax_temp.append(float(v['xmax']))
bbox_temp.append((v['xmax'],v['ymax'],v['xmin'],v['ymin']))
checksum2 = round(sum(xmax_temp),3)
answer[checksum2] = {}
answer[checksum2]['ind'] = k
answer[checksum2]['xmax'] = xmax_temp
answer[checksum2]['bbox'] = bbox_temp
except KeyError:
print gal, zid
except AttributeError:
print 'No Sources, No IR recorded for %s' % zid
# Make empty copy of next dict in same loop
ir_x[k] = []
ir_y[k] = []
# Now loop over all sets of classifications to get the IR counterparts
for c in clist:
if c['checksum'] == mc_checksum:
annlist = [ann for ann in c['annotations'] if ann.keys()[0] not in bad_keys]
for ann in annlist:
if 'ir' in ann.keys():
# Find the index k that this corresponds to
try:
xmax_checksum = round(sum([float(ann['radio'][a]['xmax']) for a in ann['radio']]),3)
except TypeError:
xmax_checksum = -99
try:
k = answer[xmax_checksum]['ind']
if ann['ir'] == 'No Sources':
ir_x[k].append(-99)
ir_y[k].append(-99)
else:
# Only takes the first IR source right now; NEEDS TO BE MODIFIED.
ir_x[k].append(float(ann['ir']['0']['x']))
ir_y[k].append(float(ann['ir']['0']['y']))
except KeyError:
print '"No radio" still appearing as valid consensus option.'
# Perform a kernel density estimate on the data for each galaxy
scale_ir = IMG_HEIGHT_NEW/IMG_HEIGHT_OLD
peak_data = []
# Remove empty IR peaks if they exist
for (xk,xv),(yk,yv) in zip(ir_x.iteritems(),ir_y.iteritems()):
if len(xv) == 0:
ir_x.pop(xk)
if len(yv) == 0:
ir_y.pop(yk)
assert len(ir_x) == len(ir_y),'Lengths of ir_x (%i) and ir_y (%i) are not the same' % (len(ir_x),len(ir_y))
for (xk,xv),(yk,yv) in zip(ir_x.iteritems(),ir_y.iteritems()):
if len(xv) == 0:
irx
pd = {}
x_exists = [xt * scale_ir for xt in xv if xt != -99.0]
y_exists = [yt * scale_ir for yt in yv if yt != -99.0]
x_all = [xt * scale_ir for xt in xv]
y_all = [yt * scale_ir for yt in yv]
coords_all = [(xx,yy) for xx,yy in zip(x_all,y_all)]
ir_Counter = Counter(coords_all)
most_common_ir = ir_Counter.most_common(1)[0][0]
if len(Counter(x_exists)) > 2 and len(Counter(y_exists)) > 2 and most_common_ir != (-99,-99):
# X,Y = grid of uniform coordinates over the IR pixel plane
X, Y = np.mgrid[xmin:xmax, ymin:ymax]
positions = np.vstack([X.ravel(), Y.ravel()])
try:
values = np.vstack([x_exists, y_exists])
except ValueError:
# Breaks on the tutorial subject. Find out why len(x) != len(y)
print zid
print 'Length of IR x array: %i; Length of IR y array: %i' % (len(x_exists),len(y_exists))
try:
kernel = stats.gaussian_kde(values)
except LinAlgError:
print 'LinAlgError in KD estimation for %s' % zid,x_exists,y_exists
continue
# Even if there are more than 2 sets of points, if they are mutually co-linear,
# matrix can't invert and kernel returns NaNs.
kp = kernel(positions)
if np.isnan(kp).sum() > 0:
acp = collinearity.collinear(x_exists,y_exists)
if len(acp) > 0:
print 'There are %i unique points for %s (source no. %i in the field), but all are co-linear; KDE estimate does not work.' % (len(Counter(x_exists)),zid,xk)
else:
print 'There are NaNs in the KDE for %s (source no. %i in the field), but points are not co-linear.' % (zid,xk)
for k,v in answer.iteritems():
if v['ind'] == xk:
answer[k]['ir'] = (np.mean(x_exists),np.mean(y_exists))
else:
Z = np.reshape(kp.T, X.shape)
# Find the number of peaks
# http://stackoverflow.com/questions/3684484/peak-detection-in-a-2d-array
neighborhood = np.ones((10,10))
local_max = maximum_filter(Z, footprint=neighborhood)==Z
background = (Z==0)
eroded_background = binary_erosion(background, structure=neighborhood, border_value=1)
detected_peaks = local_max ^ eroded_background
npeaks = detected_peaks.sum()
#return X,Y,Z,npeaks
pd['X'] = X
pd['Y'] = Y
pd['Z'] = Z
pd['npeaks'] = npeaks
try:
xpeak = float(pd['X'][pd['Z']==pd['Z'].max()][0])
ypeak = float(pd['Y'][pd['Z']==pd['Z'].max()][0])
except IndexError:
print pd
print zid, clist
for k,v in answer.iteritems():
if v['ind'] == xk:
answer[k]['ir_peak'] = (xpeak,ypeak)
# Don't write to consensus for serializable JSON object
if write_peak_data:
answer[k]['peak_data'] = pd
answer[k]['ir_x'] = x_exists
answer[k]['ir_y'] = y_exists
else:
# Note: need to actually put a limit in if less than half of users selected IR counterpart.
# Right now it still IDs a sources even if only 1/10 users said it was there.
for k,v in answer.iteritems():
if v['ind'] == xk:
# Case 1: multiple users selected IR source, but not enough unique points to pinpoint peak
if most_common_ir != (-99,-99) and len(x_exists) > 0 and len(y_exists) > 0:
answer[k]['ir'] = (x_exists[0],y_exists[0])
# Case 2: most users have selected No Sources
else:
answer[k]['ir'] = (-99,-99)
return cons
if __name__ == "__main__":
checksum()
| mit |
EtienneCmb/tensorpac | paper/reviews/code/r3_pactools_vs_tensorpac.py | 1 | 5037 | """Comparison between pactools and tensoprpac."""
import json
with open("../../paper.json", 'r') as f: cfg = json.load(f) # noqa
import pandas as pd
import numpy as np
from time import time as tst
from tensorpac.signals import pac_signals_wavelet
from tensorpac import Pac
from pactools import Comodulogram, simulate_pac
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
plt.style.use('seaborn-poster')
sns.set_style("white")
plt.rc('font', family=cfg["font"])
###############################################################################
# simulated data parameters
sf = 512.
n_epochs = 20
n_times = 4000
n_perm = 20
# frequency vectors resolutions
n_pha = 50
n_amp = 40
# method correspondance between pactools and tensorpac
METH = dict(
PACTOOLS=dict(MVL='canolty', MI='tort', PLV='penny'),
TENSORPAC=dict(MVL=1, MI=2, PLV=5)
)
###############################################################################
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# SIMULATE PAC + FREQUENCY VECTORS
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
data, time = pac_signals_wavelet(sf=sf, f_pha=10, f_amp=100, noise=.8,
n_epochs=n_epochs, n_times=n_times)
# construct frequency vectors that fit both tensorpac and pactools
f_pha_pt_width = 1.
f_pha_pt = np.linspace(3, 20, n_pha)
f_pha_tp = np.c_[f_pha_pt - f_pha_pt_width / 2, f_pha_pt + f_pha_pt_width / 2]
f_amp_pt_width = max(f_pha_pt) * 2 # pactools recommandation
f_amp_pt = np.linspace(60, 140, n_amp)
f_amp_tp = np.c_[f_amp_pt - f_amp_pt_width / 2, f_amp_pt + f_amp_pt_width / 2]
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# COMPUTING FUNCTION
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def compute_pt_tp(n_jobs, n_perm, title=''):
"""Compute Pactools and Tensorpac."""
cpt, meth_comp, soft = [], [], []
for meth in ['MVL', 'MI', 'PLV']:
# ---------------------------------------------------------------------
# get the method name
meth_pt = METH['PACTOOLS'][meth]
meth_tp = METH['TENSORPAC'][meth]
if n_perm > 0:
idpac = (meth_tp, 2, 4)
else:
idpac = (meth_tp, 0, 0)
# ---------------------------------------------------------------------
# PACTOOLS
pt_start = tst()
estimator = Comodulogram(
fs=sf, low_fq_range=f_pha_pt, low_fq_width=f_pha_pt_width,
high_fq_range=f_amp_pt, high_fq_width=f_amp_pt_width,
method=meth_pt, progress_bar=False, n_jobs=n_jobs,
n_surrogates=n_perm)
estimator.fit(data)
pt_end = tst()
# ---------------------------------------------------------------------
# TENSORPAC
tp_start = tst()
p_obj = Pac(idpac=idpac, f_pha=f_pha_tp, f_amp=f_amp_tp,
verbose='error')
pac = p_obj.filterfit(sf, data, n_jobs=n_jobs, n_perm=n_perm).mean(-1)
tp_end = tst()
# ---------------------------------------------------------------------
# shape shape checking
assert estimator.comod_.shape == pac.T.shape
# saving the results
cpt += [pt_end - pt_start, tp_end - tp_start]
meth_comp += [meth] * 2
soft += ['Pactools', 'Tensorpac']
# -------------------------------------------------------------------------
df = pd.DataFrame({"Computing time": cpt, "PAC method": meth_comp,
"Software": soft})
sns.barplot(x="PAC method", y="Computing time", hue="Software", data=df)
plt.title(title, fontsize=14, fontweight='bold')
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# RUN THE COMPARISON
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
plt.figure(figsize=(12, 10))
# single-core // no perm
plt.subplot(2, 2, 1)
compute_pt_tp(1, 0, title="Single-core // no surrogates")
plt.subplot(2, 2, 2)
compute_pt_tp(-1, 0, title="Multi-core // no surrogates")
plt.subplot(2, 2, 3)
compute_pt_tp(1, n_perm, title=f"Single-core // {n_perm} surrogates")
plt.subplot(2, 2, 4)
compute_pt_tp(-1, n_perm, title=f"Multi-core // {n_perm} surrogates")
plt.tight_layout()
plt.savefig(f"../figures/r3_pactools_vs_tensorpac.png", dpi=300,
bbox_inches='tight')
plt.show()
| bsd-3-clause |
saiwing-yeung/scikit-learn | benchmarks/bench_tree.py | 131 | 3647 | """
To run this, you'll need to have installed.
* scikit-learn
Does two benchmarks
First, we fix a training set, increase the number of
samples to classify and plot number of classified samples as a
function of time.
In the second benchmark, we increase the number of dimensions of the
training set, classify a sample and plot the time taken as a function
of the number of dimensions.
"""
import numpy as np
import matplotlib.pyplot as plt
import gc
from datetime import datetime
# to store the results
scikit_classifier_results = []
scikit_regressor_results = []
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
def bench_scikit_tree_classifier(X, Y):
"""Benchmark with scikit-learn decision tree classifier"""
from sklearn.tree import DecisionTreeClassifier
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeClassifier()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_classifier_results.append(
delta.seconds + delta.microseconds / mu_second)
def bench_scikit_tree_regressor(X, Y):
"""Benchmark with scikit-learn decision tree regressor"""
from sklearn.tree import DecisionTreeRegressor
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeRegressor()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_regressor_results.append(
delta.seconds + delta.microseconds / mu_second)
if __name__ == '__main__':
print('============================================')
print('Warning: this is going to take a looong time')
print('============================================')
n = 10
step = 10000
n_samples = 10000
dim = 10
n_classes = 10
for i in range(n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
n_samples += step
X = np.random.randn(n_samples, dim)
Y = np.random.randint(0, n_classes, (n_samples,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(n_samples)
bench_scikit_tree_regressor(X, Y)
xx = range(0, n * step, step)
plt.figure('scikit-learn tree benchmark results')
plt.subplot(211)
plt.title('Learning with varying number of samples')
plt.plot(xx, scikit_classifier_results, 'g-', label='classification')
plt.plot(xx, scikit_regressor_results, 'r-', label='regression')
plt.legend(loc='upper left')
plt.xlabel('number of samples')
plt.ylabel('Time (s)')
scikit_classifier_results = []
scikit_regressor_results = []
n = 10
step = 500
start_dim = 500
n_classes = 10
dim = start_dim
for i in range(0, n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
dim += step
X = np.random.randn(100, dim)
Y = np.random.randint(0, n_classes, (100,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(100)
bench_scikit_tree_regressor(X, Y)
xx = np.arange(start_dim, start_dim + n * step, step)
plt.subplot(212)
plt.title('Learning in high dimensional spaces')
plt.plot(xx, scikit_classifier_results, 'g-', label='classification')
plt.plot(xx, scikit_regressor_results, 'r-', label='regression')
plt.legend(loc='upper left')
plt.xlabel('number of dimensions')
plt.ylabel('Time (s)')
plt.axis('tight')
plt.show()
| bsd-3-clause |
NMTHydro/Recharge | zobs/orecharge/Gauges/plot_q_snow_ppt.py | 1 | 3162 | import datetime
from dateutil import rrule
from matplotlib import pyplot as plt
import os
import numpy as np
folder = 'C:\\Users\David\\Documents\\Recharge\\Gauges\\Complete_q_ppt_HF_csv'
os.chdir(folder)
select_csv = "8269000_date_q_ppt.csv"
fid = open(select_csv)
lines = fid.readlines()[0:]
fid.close()
rows = [line.split(',') for line in lines]
recs = []
for line in rows:
recs.append([datetime.datetime.strptime(line[0], '%Y/%m/%d %H:%M'), # date
float(line[1]), float(line[2])]) # discharge
print "Data points: " + str(len(recs))
all_recs = np.array(recs)
# Bring in snow
folder = 'C:\\Users\\David\\Documents\\Recharge\\Point_data\\Taos'
os.chdir(folder)
select_csv = "Taos_Snow_2010_2013.csv"
fid = open(select_csv)
lines = fid.readlines()[1:]
fid.close()
rows = [line.split(',') for line in lines]
recs = []
for line in rows:
recs.append([datetime.datetime.strptime(line[1], '%m/%d/%Y'), # date
float(line[3])]) # discharge
print "Data points: " + str(len(recs))
snow = np.array(recs)
snow_dates = snow[:, 0]
snow_in = snow[:, 1]
in_mm = 25.4
snow_mm = np.multiply(snow_in, in_mm)
snow_mm = np.array(snow_mm, dtype=float)
# def make_patch_spines_invisible(ax):
fig, ax = plt.subplots()
ax.set_frame_on(True)
ax.patch.set_visible(False)
for sp in ax.spines.values():
sp.set_visible(False)
fig, host = plt.subplots()
fig.subplots_adjust(right=0.75)
par1 = host.twinx()
par2 = host.twinx()
par2.spines["right"].set_position(("axes", 1.2))
# make_patch_spines_invisible(par2)
par2.spines["right"].set_visible(True)
p1, = host.plot(all_recs[:, 0], all_recs[:, 1], "Purple", label="Discharge")
p2, = par1.plot(all_recs[:, 0], all_recs[:, 2], "g", label="Precipitation")
p3, = par2.plot(snow[:, 0], snow[:, 1], "b", label="Snow Water Equivalent")
# host.set_xlim(0, 2)
# host.set_ylim(0, 2)
# par1.set_ylim(0, 4)
par2.set_ylim(70, 0)
host.set_xlabel("Date")
host.set_ylabel("Cubic Feet per Second")
par1.set_ylabel("Precipitation Total for Watershed [m^3]")
par2.set_ylabel("[mm]")
host.yaxis.label.set_color(p1.get_color())
par1.yaxis.label.set_color(p2.get_color())
par2.yaxis.label.set_color(p3.get_color())
tkw = dict(size=4, width=1.5)
host.tick_params(axis='y', colors=p1.get_color(), **tkw)
par1.tick_params(axis='y', colors=p2.get_color(), **tkw)
par2.tick_params(axis='y', colors=p3.get_color(), **tkw)
host.tick_params(axis='x', **tkw)
lines = [p1, p2, p3]
host.legend(lines, [l.get_label() for l in lines], loc=2)
plt.show()
# fig, ax1 = plt.subplots(1, figsize=(15, 5))
# ax1.plot(all_recs[:, 0], all_recs[:, 1], '-r', label='Discharge (cfs)')
# ax1.set_ylabel('Discharge (cfs)', color='r')
# ax1.set_xlabel('Time')
# plt.legend()
# # plt.ylim(0.0,1.2)
# for tl in ax1.get_yticklabels():
# tl.set_color('r')
# ax2 = ax1.twinx()
# ax2.plot(all_recs[:, 0], all_recs[:, 2], '-g', label='Watershed Rainfall (daily, cubic meters)')
# ax2.set_ylabel('Precipitation', color='g')
# # plt.ylim(0.0, 1.0)
# for tl in ax2.get_yticklabels():
# tl.set_color('g')
# for tl in ax2.get_xticklabels():
# tl.set_color('k')
# plt.legend()
# plt.title('Pueblo de Taos River Discharge vs Watershed Rainfall')
# plt.show()
| apache-2.0 |
abhishekgahlot/scikit-learn | sklearn/datasets/tests/test_base.py | 39 | 5607 | import os
import shutil
import tempfile
import warnings
import nose
import numpy
from sklearn.datasets import get_data_home
from sklearn.datasets import clear_data_home
from sklearn.datasets import load_files
from sklearn.datasets import load_sample_images
from sklearn.datasets import load_sample_image
from sklearn.datasets import load_digits
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_linnerud
from sklearn.datasets import load_iris
from sklearn.datasets import load_boston
from sklearn.externals.six import b, u
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
DATA_HOME = tempfile.mkdtemp(prefix="scikit_learn_data_home_test_")
LOAD_FILES_ROOT = tempfile.mkdtemp(prefix="scikit_learn_load_files_test_")
TEST_CATEGORY_DIR1 = ""
TEST_CATEGORY_DIR2 = ""
def _remove_dir(path):
if os.path.isdir(path):
shutil.rmtree(path)
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
for path in [DATA_HOME, LOAD_FILES_ROOT]:
_remove_dir(path)
def setup_load_files():
global TEST_CATEGORY_DIR1
global TEST_CATEGORY_DIR2
TEST_CATEGORY_DIR1 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
TEST_CATEGORY_DIR2 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
sample_file = tempfile.NamedTemporaryFile(dir=TEST_CATEGORY_DIR1,
delete=False)
sample_file.write(b("Hello World!\n"))
sample_file.close()
def teardown_load_files():
_remove_dir(TEST_CATEGORY_DIR1)
_remove_dir(TEST_CATEGORY_DIR2)
def test_data_home():
# get_data_home will point to a pre-existing folder
data_home = get_data_home(data_home=DATA_HOME)
assert_equal(data_home, DATA_HOME)
assert_true(os.path.exists(data_home))
# clear_data_home will delete both the content and the folder it-self
clear_data_home(data_home=data_home)
assert_false(os.path.exists(data_home))
# if the folder is missing it will be created again
data_home = get_data_home(data_home=DATA_HOME)
assert_true(os.path.exists(data_home))
def test_default_empty_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 0)
assert_equal(len(res.target_names), 0)
assert_equal(res.DESCR, None)
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_default_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.data, [b("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_w_categories_desc_and_encoding():
category = os.path.abspath(TEST_CATEGORY_DIR1).split('/').pop()
res = load_files(LOAD_FILES_ROOT, description="test",
categories=category, encoding="utf-8")
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 1)
assert_equal(res.DESCR, "test")
assert_equal(res.data, [u("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_wo_load_content():
res = load_files(LOAD_FILES_ROOT, load_content=False)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.get('data'), None)
def test_load_sample_images():
try:
res = load_sample_images()
assert_equal(len(res.images), 2)
assert_equal(len(res.filenames), 2)
assert_true(res.DESCR)
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_digits():
digits = load_digits()
assert_equal(digits.data.shape, (1797, 64))
assert_equal(numpy.unique(digits.target).size, 10)
def test_load_digits_n_class_lt_10():
digits = load_digits(9)
assert_equal(digits.data.shape, (1617, 64))
assert_equal(numpy.unique(digits.target).size, 9)
def test_load_sample_image():
try:
china = load_sample_image('china.jpg')
assert_equal(china.dtype, 'uint8')
assert_equal(china.shape, (427, 640, 3))
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_missing_sample_image_error():
have_PIL = True
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
have_PIL = False
if have_PIL:
assert_raises(AttributeError, load_sample_image,
'blop.jpg')
else:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_diabetes():
res = load_diabetes()
assert_equal(res.data.shape, (442, 10))
assert_true(res.target.size, 442)
def test_load_linnerud():
res = load_linnerud()
assert_equal(res.data.shape, (20, 3))
assert_equal(res.target.shape, (20, 3))
assert_equal(len(res.target_names), 3)
assert_true(res.DESCR)
def test_load_iris():
res = load_iris()
assert_equal(res.data.shape, (150, 4))
assert_equal(res.target.size, 150)
assert_equal(res.target_names.size, 3)
assert_true(res.DESCR)
def test_load_boston():
res = load_boston()
assert_equal(res.data.shape, (506, 13))
assert_equal(res.target.size, 506)
assert_equal(res.feature_names.size, 13)
assert_true(res.DESCR)
| bsd-3-clause |
BoltzmannBrain/nupic.research | projects/sequence_prediction/continuous_sequence/run_knn.py | 12 | 7104 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import csv
import math
import operator
from optparse import OptionParser
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
plt.ion()
def euclideanDistance(instance1, instance2, considerDimensions):
"""
Calculate Euclidean Distance between two samples
Example use:
data1 = [2, 2, 2, 'class_a']
data2 = [4, 4, 4, 'class_b']
distance = euclideanDistance(data1, data2, 3)
:param instance1: list of attributes
:param instance2: list of attributes
:param considerDimensions: a list of dimensions to consider
:return: float euclidean distance between data1 & 2
"""
distance = 0
for x in considerDimensions:
distance += pow((instance1[x] - instance2[x]), 2)
return math.sqrt(distance)
def getNeighbors(trainingSet, testInstance, k, considerDimensions=None):
"""
collect the k most similar instances in the trainingSet for a given test
instance
:param trainingSet: A list of data instances
:param testInstance: a single data instance
:param k: number of neighbors
:param considerDimensions: a list of dimensions to consider
:return: neighbors: a list of neighbor instance
"""
if considerDimensions is None:
considerDimensions = len(testInstance) - 1
neighborList = []
for x in range(len(trainingSet)):
dist = euclideanDistance(testInstance, trainingSet[x], considerDimensions)
neighborList.append((trainingSet[x], dist))
neighborList.sort(key=operator.itemgetter(1))
neighbors = []
distances = []
for x in range(k):
neighbors.append(neighborList[x][0])
distances.append(neighborList[x][1])
return neighbors
def getResponse(neighbors, weights=None):
"""
Calculated weighted response based on a list of nearest neighbors
:param neighbors: a list of neighbors, each entry is a data instance
:param weights: a numpy array of the same length as the neighbors
:return: weightedAvg: weighted average response
"""
neighborResponse = []
for x in range(len(neighbors)):
neighborResponse.append(neighbors[x][-1])
neighborResponse = np.array(neighborResponse).astype('float')
if weights is None:
weightedAvg = np.mean(neighborResponse)
else:
weightedAvg = np.sum(weights * neighborResponse)
return weightedAvg
def readDataSet(dataSet):
filePath = 'data/' + dataSet + '.csv'
if dataSet == 'nyc_taxi':
df = pd.read_csv(filePath, header=0, skiprows=[1, 2],
names=['time', 'data', 'timeofday', 'dayofweek'])
sequence = df['data']
dayofweek = df['dayofweek']
timeofday = df['timeofday']
sequence5stepsAgo = np.roll(np.array(sequence), 5)
seq = []
for i in xrange(len(sequence)):
seq.append(
[timeofday[i], dayofweek[i], sequence5stepsAgo[i], sequence[i]])
else:
raise (' unrecognized dataset type ')
return seq
def _getArgs():
parser = OptionParser(usage="%prog PARAMS_DIR OUTPUT_DIR [options]"
"\n\nCompare TM performance with trivial predictor using "
"model outputs in prediction directory "
"and outputting results to result directory.")
parser.add_option("-d",
"--dataSet",
type=str,
default='nyc_taxi',
dest="dataSet",
help="DataSet Name, choose from sine, SantaFe_A, MackeyGlass")
parser.add_option("-n",
"--trainingDataSize",
type=int,
default=6000,
dest="trainingDataSize",
help="size of training dataset")
(options, remainder) = parser.parse_args()
print options
return options, remainder
def saveResultToFile(dataSet, predictedInput, algorithmName):
inputFileName = 'data/' + dataSet + '.csv'
inputFile = open(inputFileName, "rb")
csvReader = csv.reader(inputFile)
# skip header rows
csvReader.next()
csvReader.next()
csvReader.next()
outputFileName = './prediction/' + dataSet + '_' + algorithmName + '_pred.csv'
outputFile = open(outputFileName, "w")
csvWriter = csv.writer(outputFile)
csvWriter.writerow(
['timestamp', 'data', 'prediction-' + str(predictionStep) + 'step'])
csvWriter.writerow(['datetime', 'float', 'float'])
csvWriter.writerow(['', '', ''])
for i in xrange(len(sequence)):
row = csvReader.next()
csvWriter.writerow([row[0], row[1], predictedInput[i]])
inputFile.close()
outputFile.close()
def normalizeSequence(sequence, considerDimensions=None):
"""
normalize sequence by subtracting the mean and
:param sequence: a list of data samples
:param considerDimensions: a list of dimensions to consider
:return: normalized sequence
"""
seq = np.array(sequence).astype('float64')
nSampleDim = seq.shape[1]
if considerDimensions is None:
considerDimensions = range(nSampleDim)
for dim in considerDimensions:
seq[:, dim] = (seq[:, dim] - np.mean(seq[:, dim])) / np.std(seq[:, dim])
sequence = seq.tolist()
return sequence
if __name__ == "__main__":
(_options, _args) = _getArgs()
dataSet = _options.dataSet
numTrain = _options.trainingDataSize
print "run knn on ", dataSet
sequence = readDataSet(dataSet)
# predict 5 steps ahead
predictionStep = 5
nFeature = 2
k = 10
sequence = normalizeSequence(sequence, considerDimensions=[0, 1, 2])
targetInput = np.zeros((len(sequence),))
predictedInput = np.zeros((len(sequence),))
for i in xrange(numTrain, len(sequence) - predictionStep):
testInstance = sequence[i + predictionStep]
targetInput[i] = testInstance[-1]
# select data points at that shares same timeOfDay and dayOfWeek
neighbors = getNeighbors(sequence[i - numTrain:i], testInstance, k, [0, 1, 2])
predictedInput[i] = getResponse(neighbors)
print "step %d, target input: %d predicted Input: %d " % (
i, targetInput[i], predictedInput[i])
saveResultToFile(dataSet, predictedInput, 'plainKNN')
plt.figure()
plt.plot(targetInput)
plt.plot(predictedInput)
plt.xlim([12800, 13500])
plt.ylim([0, 30000])
| agpl-3.0 |
RayMick/scikit-learn | sklearn/decomposition/tests/test_sparse_pca.py | 160 | 6028 | # Author: Vlad Niculae
# License: BSD 3 clause
import sys
import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import if_safe_multiprocessing_with_blas
from sklearn.decomposition import SparsePCA, MiniBatchSparsePCA
from sklearn.utils import check_random_state
def generate_toy_data(n_components, n_samples, image_size, random_state=None):
n_features = image_size[0] * image_size[1]
rng = check_random_state(random_state)
U = rng.randn(n_samples, n_components)
V = rng.randn(n_components, n_features)
centers = [(3, 3), (6, 7), (8, 1)]
sz = [1, 2, 1]
for k in range(n_components):
img = np.zeros(image_size)
xmin, xmax = centers[k][0] - sz[k], centers[k][0] + sz[k]
ymin, ymax = centers[k][1] - sz[k], centers[k][1] + sz[k]
img[xmin:xmax][:, ymin:ymax] = 1.0
V[k, :] = img.ravel()
# Y is defined by : Y = UV + noise
Y = np.dot(U, V)
Y += 0.1 * rng.randn(Y.shape[0], Y.shape[1]) # Add noise
return Y, U, V
# SparsePCA can be a bit slow. To avoid having test times go up, we
# test different aspects of the code in the same test
def test_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
spca = SparsePCA(n_components=8, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
spca = SparsePCA(n_components=13, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_fit_transform():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
# Test that CD gives similar results
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=0,
alpha=alpha)
spca_lasso.fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
@if_safe_multiprocessing_with_blas
def test_fit_transform_parallel():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
spca = SparsePCA(n_components=3, n_jobs=2, method='lars', alpha=alpha,
random_state=0).fit(Y)
U2 = spca.transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
def test_transform_nan():
# Test that SparsePCA won't return NaN when there is 0 feature in all
# samples.
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
Y[:, 0] = 0
estimator = SparsePCA(n_components=8)
assert_false(np.any(np.isnan(estimator.fit_transform(Y))))
def test_fit_transform_tall():
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 65, (8, 8), random_state=rng) # tall array
spca_lars = SparsePCA(n_components=3, method='lars',
random_state=rng)
U1 = spca_lars.fit_transform(Y)
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=rng)
U2 = spca_lasso.fit(Y).transform(Y)
assert_array_almost_equal(U1, U2)
def test_initialization():
rng = np.random.RandomState(0)
U_init = rng.randn(5, 3)
V_init = rng.randn(3, 4)
model = SparsePCA(n_components=3, U_init=U_init, V_init=V_init, max_iter=0,
random_state=rng)
model.fit(rng.randn(5, 4))
assert_array_equal(model.components_, V_init)
def test_mini_batch_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
pca = MiniBatchSparsePCA(n_components=8, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
pca = MiniBatchSparsePCA(n_components=13, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_mini_batch_fit_transform():
raise SkipTest("skipping mini_batch_fit_transform.")
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = MiniBatchSparsePCA(n_components=3, random_state=0,
alpha=alpha).fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
if sys.platform == 'win32': # fake parallelism for win32
import sklearn.externals.joblib.parallel as joblib_par
_mp = joblib_par.multiprocessing
joblib_par.multiprocessing = None
try:
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
finally:
joblib_par.multiprocessing = _mp
else: # we can efficiently use parallelism
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
# Test that CD gives similar results
spca_lasso = MiniBatchSparsePCA(n_components=3, method='cd', alpha=alpha,
random_state=0).fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
| bsd-3-clause |
openmachinesblog/visualization-census-2013 | basic.py | 1 | 6264 | import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from matplotlib.patches import Polygon, PathPatch
from matplotlib.collections import PatchCollection
from mpl_toolkits.basemap import Basemap
import numpy as np
import io
import zipfile
import csv
import sys
def find_nearest_ind(array,value):
idx = (np.abs(array-value)).argmin()
return idx
# part of http://stackoverflow.com/a/17479417/2501747
def add_subplot_axes(ax,rect):
fig = plt.gcf()
box = ax.get_position()
width = box.width
height = box.height
inax_position = ax.transAxes.transform(rect[0:2])
transFigure = fig.transFigure.inverted()
infig_position = transFigure.transform(inax_position)
x = infig_position[0]
y = infig_position[1]
width *= rect[2]
height *= rect[3]
subax = fig.add_axes([x,y,width,height],frameon=False) # we don't need a frame
return subax
# state codes from http://www2.census.gov/programs-surveys/acs/tech_docs/pums/data_dict/PUMSDataDict13.txt
# note that areas outside of the conus have been commented out
state_codes = {'01': 'Alabama',
'02': 'Alaska',
'15': 'Hawaii',
'04': 'Arizona',
'05': 'Arkansas',
'06': 'California',
'08': 'Colorado',
'09': 'Connecticut',
'10': 'Delaware',
# '11': 'District of Columbia',
'12': 'Florida',
'13': 'Georgia',
'16': 'Idaho',
'17': 'Illinois',
'18': 'Indiana',
'19': 'Iowa',
'20': 'Kansas',
'21': 'Kentucky',
'22': 'Louisiana',
'23': 'Maine',
'24': 'Maryland',
'25': 'Massachusetts',
'26': 'Michigan',
'27': 'Minnesota',
'28': 'Mississippi',
'29': 'Missouri',
'30': 'Montana',
'31': 'Nebraska',
'32': 'Nevada',
'33': 'New Hampshire',
'34': 'New Jersey',
'35': 'New Mexico',
'36': 'New York',
'37': 'North Carolina',
'38': 'North Dakota',
'39': 'Ohio',
'40': 'Oklahoma',
'41': 'Oregon',
'42': 'Pennsylvania',
'44': 'Rhode Island',
'45': 'South Carolina',
'46': 'South Dakota',
'47': 'Tennessee',
'48': 'Texas',
'49': 'Utah',
'50': 'Vermont',
'51': 'Virginia',
'53': 'Washington',
'54': 'West Virginia',
'55': 'Wisconsin',
'56': 'Wyoming',
# '72': 'Puerto Rico'
}
colArg = sys.argv[1]
csvf = csv.reader(open('output-{0}.csv'.format(colArg), 'rb'))
header = csvf.next()
# row_count = sum(1 for row in csvf)
row_count = 1211264
"""
Generate the data structure
{state: {puma: []}}
"""
data = {}
for i in range(row_count):
row=csvf.next()
state=row[0]
puma=row[1]
col=int(row[2])
if (state not in data):
data.update({state: {puma: np.array([col])}})
elif (puma not in data[state]):
data[state].update({puma: np.array([col])})
else:
data[state][puma] = np.append(data[state][puma],col)
"""
Use three subplots (mainland,Hawaii,Alaska)
"""
fig = plt.figure(figsize=(20,10))
ax = fig.add_subplot(111)
rect = [0.08,0.05,0.35,0.35]
axAlaska = add_subplot_axes(ax,rect)
rect = [0.3,0.02,0.2,0.2]
axHawaii = add_subplot_axes(ax,rect)
fig.suptitle('Census 2013: Internet access', fontsize=20)
# create a map object with the Albers Equal Areas projection.
# This projection tends to look nice for the contiguous us.
mNormal = Basemap(width=5000000,height=3500000,
resolution='l',projection='aea',\
ax=ax, \
lon_0=-96,lat_0=38)
mAlaska = Basemap(width=5000000,height=3500000,
resolution='l',projection='aea',\
ax=axAlaska, \
lon_0=-155,lat_0=65)
mHawaii = Basemap(width=1000000,height=700000,
resolution='l',projection='aea',\
ax=axHawaii, \
lon_0=-157,lat_0=20)
# define a colorramp
num_colors = 21
cm = plt.get_cmap('RdYlGn')
colorGradient = [cm(1.*i/num_colors) for i in range(num_colors)]
# read each states shapefile
for key in state_codes.keys():
if (state_codes[key] == "Alaska"):
mAlaska.readshapefile('shapefiles/pums/tl_2013_{0}_puma10'.format(key),name='state', drawbounds=True)
m = mAlaska
elif (state_codes[key] == "Hawaii"):
mHawaii.readshapefile('shapefiles/pums/tl_2013_{0}_puma10'.format(key),name='state', drawbounds=True)
m = mHawaii
else:
mNormal.readshapefile('shapefiles/pums/tl_2013_{0}_puma10'.format(key),name='state', drawbounds=True)
m = mNormal
# loop through each PUMA and assign the correct color to its shape
for info, shape in zip(m.state_info, m.state):
dataForStPuma = data[key][info['PUMACE10']]
# get the percentage of households with Internet access
woAccess = (dataForStPuma == 3)
accessPerc = 1-(sum(woAccess)/(1.0*len(dataForStPuma)))
colorInd = int(round(accessPerc*num_colors))
patches = [Polygon(np.array(shape), True)]
pc = PatchCollection(patches, edgecolor='k', linewidths=1., zorder=2)
pc.set_color(colorGradient[colorInd])
if (state_codes[key] == "Alaska"):
axAlaska.add_collection(pc)
elif (state_codes[key] == "Hawaii"):
axHawaii.add_collection(pc)
else:
ax.add_collection(pc)
# add colorbar legend
cmap = mpl.colors.ListedColormap(colorGradient)
# define the bins and normalize
bounds = np.linspace(0,100,num_colors)
# create a second axes for the colorbar
ax2 = fig.add_axes([0.82, 0.1, 0.03, 0.8])
cb = mpl.colorbar.ColorbarBase(ax2, cmap=cmap, ticks=bounds, boundaries=bounds, format='%1i')
# vertically oriented colorbar
cb.ax.set_yticklabels([str(int(i))+"%" for i in bounds])
plt.savefig('map-{0}.png'.format(colArg))
| mit |
jmmease/pandas | pandas/tests/frame/test_quantile.py | 10 | 15893 | # -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
import numpy as np
from pandas import (DataFrame, Series, Timestamp, _np_version_under1p11)
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_frame_equal
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameQuantile(TestData):
def test_quantile(self):
from numpy import percentile
q = self.tsframe.quantile(0.1, axis=0)
assert q['A'] == percentile(self.tsframe['A'], 10)
tm.assert_index_equal(q.index, self.tsframe.columns)
q = self.tsframe.quantile(0.9, axis=1)
assert (q['2000-01-17'] ==
percentile(self.tsframe.loc['2000-01-17'], 90))
tm.assert_index_equal(q.index, self.tsframe.index)
# test degenerate case
q = DataFrame({'x': [], 'y': []}).quantile(0.1, axis=0)
assert(np.isnan(q['x']) and np.isnan(q['y']))
# non-numeric exclusion
df = DataFrame({'col1': ['A', 'A', 'B', 'B'], 'col2': [1, 2, 3, 4]})
rs = df.quantile(0.5)
xp = df.median().rename(0.5)
assert_series_equal(rs, xp)
# axis
df = DataFrame({"A": [1, 2, 3], "B": [2, 3, 4]}, index=[1, 2, 3])
result = df.quantile(.5, axis=1)
expected = Series([1.5, 2.5, 3.5], index=[1, 2, 3], name=0.5)
assert_series_equal(result, expected)
result = df.quantile([.5, .75], axis=1)
expected = DataFrame({1: [1.5, 1.75], 2: [2.5, 2.75],
3: [3.5, 3.75]}, index=[0.5, 0.75])
assert_frame_equal(result, expected, check_index_type=True)
# We may want to break API in the future to change this
# so that we exclude non-numeric along the same axis
# See GH #7312
df = DataFrame([[1, 2, 3],
['a', 'b', 4]])
result = df.quantile(.5, axis=1)
expected = Series([3., 4.], index=[0, 1], name=0.5)
assert_series_equal(result, expected)
def test_quantile_axis_mixed(self):
# mixed on axis=1
df = DataFrame({"A": [1, 2, 3],
"B": [2., 3., 4.],
"C": pd.date_range('20130101', periods=3),
"D": ['foo', 'bar', 'baz']})
result = df.quantile(.5, axis=1)
expected = Series([1.5, 2.5, 3.5], name=0.5)
assert_series_equal(result, expected)
# must raise
def f():
df.quantile(.5, axis=1, numeric_only=False)
pytest.raises(TypeError, f)
def test_quantile_axis_parameter(self):
# GH 9543/9544
df = DataFrame({"A": [1, 2, 3], "B": [2, 3, 4]}, index=[1, 2, 3])
result = df.quantile(.5, axis=0)
expected = Series([2., 3.], index=["A", "B"], name=0.5)
assert_series_equal(result, expected)
expected = df.quantile(.5, axis="index")
assert_series_equal(result, expected)
result = df.quantile(.5, axis=1)
expected = Series([1.5, 2.5, 3.5], index=[1, 2, 3], name=0.5)
assert_series_equal(result, expected)
result = df.quantile(.5, axis="columns")
assert_series_equal(result, expected)
pytest.raises(ValueError, df.quantile, 0.1, axis=-1)
pytest.raises(ValueError, df.quantile, 0.1, axis="column")
def test_quantile_interpolation(self):
# see gh-10174
from numpy import percentile
# interpolation = linear (default case)
q = self.tsframe.quantile(0.1, axis=0, interpolation='linear')
assert q['A'] == percentile(self.tsframe['A'], 10)
q = self.intframe.quantile(0.1)
assert q['A'] == percentile(self.intframe['A'], 10)
# test with and without interpolation keyword
q1 = self.intframe.quantile(0.1)
assert q1['A'] == np.percentile(self.intframe['A'], 10)
tm.assert_series_equal(q, q1)
# interpolation method other than default linear
df = DataFrame({"A": [1, 2, 3], "B": [2, 3, 4]}, index=[1, 2, 3])
result = df.quantile(.5, axis=1, interpolation='nearest')
expected = Series([1, 2, 3], index=[1, 2, 3], name=0.5)
tm.assert_series_equal(result, expected)
# cross-check interpolation=nearest results in original dtype
exp = np.percentile(np.array([[1, 2, 3], [2, 3, 4]]), .5,
axis=0, interpolation='nearest')
expected = Series(exp, index=[1, 2, 3], name=0.5, dtype='int64')
tm.assert_series_equal(result, expected)
# float
df = DataFrame({"A": [1., 2., 3.], "B": [2., 3., 4.]}, index=[1, 2, 3])
result = df.quantile(.5, axis=1, interpolation='nearest')
expected = Series([1., 2., 3.], index=[1, 2, 3], name=0.5)
tm.assert_series_equal(result, expected)
exp = np.percentile(np.array([[1., 2., 3.], [2., 3., 4.]]), .5,
axis=0, interpolation='nearest')
expected = Series(exp, index=[1, 2, 3], name=0.5, dtype='float64')
assert_series_equal(result, expected)
# axis
result = df.quantile([.5, .75], axis=1, interpolation='lower')
expected = DataFrame({1: [1., 1.], 2: [2., 2.],
3: [3., 3.]}, index=[0.5, 0.75])
assert_frame_equal(result, expected)
# test degenerate case
df = DataFrame({'x': [], 'y': []})
q = df.quantile(0.1, axis=0, interpolation='higher')
assert(np.isnan(q['x']) and np.isnan(q['y']))
# multi
df = DataFrame([[1, 1, 1], [2, 2, 2], [3, 3, 3]],
columns=['a', 'b', 'c'])
result = df.quantile([.25, .5], interpolation='midpoint')
# https://github.com/numpy/numpy/issues/7163
if _np_version_under1p11:
expected = DataFrame([[1.5, 1.5, 1.5], [2.5, 2.5, 2.5]],
index=[.25, .5], columns=['a', 'b', 'c'])
else:
expected = DataFrame([[1.5, 1.5, 1.5], [2.0, 2.0, 2.0]],
index=[.25, .5], columns=['a', 'b', 'c'])
assert_frame_equal(result, expected)
def test_quantile_multi(self):
df = DataFrame([[1, 1, 1], [2, 2, 2], [3, 3, 3]],
columns=['a', 'b', 'c'])
result = df.quantile([.25, .5])
expected = DataFrame([[1.5, 1.5, 1.5], [2., 2., 2.]],
index=[.25, .5], columns=['a', 'b', 'c'])
assert_frame_equal(result, expected)
# axis = 1
result = df.quantile([.25, .5], axis=1)
expected = DataFrame([[1.5, 1.5, 1.5], [2., 2., 2.]],
index=[.25, .5], columns=[0, 1, 2])
# empty
result = DataFrame({'x': [], 'y': []}).quantile([0.1, .9], axis=0)
expected = DataFrame({'x': [np.nan, np.nan], 'y': [np.nan, np.nan]},
index=[.1, .9])
assert_frame_equal(result, expected)
def test_quantile_datetime(self):
df = DataFrame({'a': pd.to_datetime(['2010', '2011']), 'b': [0, 5]})
# exclude datetime
result = df.quantile(.5)
expected = Series([2.5], index=['b'])
# datetime
result = df.quantile(.5, numeric_only=False)
expected = Series([Timestamp('2010-07-02 12:00:00'), 2.5],
index=['a', 'b'],
name=0.5)
assert_series_equal(result, expected)
# datetime w/ multi
result = df.quantile([.5], numeric_only=False)
expected = DataFrame([[Timestamp('2010-07-02 12:00:00'), 2.5]],
index=[.5], columns=['a', 'b'])
assert_frame_equal(result, expected)
# axis = 1
df['c'] = pd.to_datetime(['2011', '2012'])
result = df[['a', 'c']].quantile(.5, axis=1, numeric_only=False)
expected = Series([Timestamp('2010-07-02 12:00:00'),
Timestamp('2011-07-02 12:00:00')],
index=[0, 1],
name=0.5)
assert_series_equal(result, expected)
result = df[['a', 'c']].quantile([.5], axis=1, numeric_only=False)
expected = DataFrame([[Timestamp('2010-07-02 12:00:00'),
Timestamp('2011-07-02 12:00:00')]],
index=[0.5], columns=[0, 1])
assert_frame_equal(result, expected)
# empty when numeric_only=True
# FIXME (gives empty frame in 0.18.1, broken in 0.19.0)
# result = df[['a', 'c']].quantile(.5)
# result = df[['a', 'c']].quantile([.5])
def test_quantile_invalid(self):
msg = 'percentiles should all be in the interval \\[0, 1\\]'
for invalid in [-1, 2, [0.5, -1], [0.5, 2]]:
with tm.assert_raises_regex(ValueError, msg):
self.tsframe.quantile(invalid)
def test_quantile_box(self):
df = DataFrame({'A': [pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03')],
'B': [pd.Timestamp('2011-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-02', tz='US/Eastern'),
pd.Timestamp('2011-01-03', tz='US/Eastern')],
'C': [pd.Timedelta('1 days'),
pd.Timedelta('2 days'),
pd.Timedelta('3 days')]})
res = df.quantile(0.5, numeric_only=False)
exp = pd.Series([pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-02', tz='US/Eastern'),
pd.Timedelta('2 days')],
name=0.5, index=['A', 'B', 'C'])
tm.assert_series_equal(res, exp)
res = df.quantile([0.5], numeric_only=False)
exp = pd.DataFrame([[pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-02', tz='US/Eastern'),
pd.Timedelta('2 days')]],
index=[0.5], columns=['A', 'B', 'C'])
tm.assert_frame_equal(res, exp)
# DatetimeBlock may be consolidated and contain NaT in different loc
df = DataFrame({'A': [pd.Timestamp('2011-01-01'),
pd.NaT,
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03')],
'a': [pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02'),
pd.NaT,
pd.Timestamp('2011-01-03')],
'B': [pd.Timestamp('2011-01-01', tz='US/Eastern'),
pd.NaT,
pd.Timestamp('2011-01-02', tz='US/Eastern'),
pd.Timestamp('2011-01-03', tz='US/Eastern')],
'b': [pd.Timestamp('2011-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-02', tz='US/Eastern'),
pd.NaT,
pd.Timestamp('2011-01-03', tz='US/Eastern')],
'C': [pd.Timedelta('1 days'),
pd.Timedelta('2 days'),
pd.Timedelta('3 days'),
pd.NaT],
'c': [pd.NaT,
pd.Timedelta('1 days'),
pd.Timedelta('2 days'),
pd.Timedelta('3 days')]},
columns=list('AaBbCc'))
res = df.quantile(0.5, numeric_only=False)
exp = pd.Series([pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-02', tz='US/Eastern'),
pd.Timestamp('2011-01-02', tz='US/Eastern'),
pd.Timedelta('2 days'),
pd.Timedelta('2 days')],
name=0.5, index=list('AaBbCc'))
tm.assert_series_equal(res, exp)
res = df.quantile([0.5], numeric_only=False)
exp = pd.DataFrame([[pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-02', tz='US/Eastern'),
pd.Timestamp('2011-01-02', tz='US/Eastern'),
pd.Timedelta('2 days'),
pd.Timedelta('2 days')]],
index=[0.5], columns=list('AaBbCc'))
tm.assert_frame_equal(res, exp)
def test_quantile_nan(self):
# GH 14357 - float block where some cols have missing values
df = DataFrame({'a': np.arange(1, 6.0), 'b': np.arange(1, 6.0)})
df.iloc[-1, 1] = np.nan
res = df.quantile(0.5)
exp = Series([3.0, 2.5], index=['a', 'b'], name=0.5)
tm.assert_series_equal(res, exp)
res = df.quantile([0.5, 0.75])
exp = DataFrame({'a': [3.0, 4.0], 'b': [2.5, 3.25]}, index=[0.5, 0.75])
tm.assert_frame_equal(res, exp)
res = df.quantile(0.5, axis=1)
exp = Series(np.arange(1.0, 6.0), name=0.5)
tm.assert_series_equal(res, exp)
res = df.quantile([0.5, 0.75], axis=1)
exp = DataFrame([np.arange(1.0, 6.0)] * 2, index=[0.5, 0.75])
tm.assert_frame_equal(res, exp)
# full-nan column
df['b'] = np.nan
res = df.quantile(0.5)
exp = Series([3.0, np.nan], index=['a', 'b'], name=0.5)
tm.assert_series_equal(res, exp)
res = df.quantile([0.5, 0.75])
exp = DataFrame({'a': [3.0, 4.0], 'b': [np.nan, np.nan]},
index=[0.5, 0.75])
tm.assert_frame_equal(res, exp)
def test_quantile_nat(self):
# full NaT column
df = DataFrame({'a': [pd.NaT, pd.NaT, pd.NaT]})
res = df.quantile(0.5, numeric_only=False)
exp = Series([pd.NaT], index=['a'], name=0.5)
tm.assert_series_equal(res, exp)
res = df.quantile([0.5], numeric_only=False)
exp = DataFrame({'a': [pd.NaT]}, index=[0.5])
tm.assert_frame_equal(res, exp)
# mixed non-null / full null column
df = DataFrame({'a': [pd.Timestamp('2012-01-01'),
pd.Timestamp('2012-01-02'),
pd.Timestamp('2012-01-03')],
'b': [pd.NaT, pd.NaT, pd.NaT]})
res = df.quantile(0.5, numeric_only=False)
exp = Series([pd.Timestamp('2012-01-02'), pd.NaT], index=['a', 'b'],
name=0.5)
tm.assert_series_equal(res, exp)
res = df.quantile([0.5], numeric_only=False)
exp = DataFrame([[pd.Timestamp('2012-01-02'), pd.NaT]], index=[0.5],
columns=['a', 'b'])
tm.assert_frame_equal(res, exp)
def test_quantile_empty(self):
# floats
df = DataFrame(columns=['a', 'b'], dtype='float64')
res = df.quantile(0.5)
exp = Series([np.nan, np.nan], index=['a', 'b'], name=0.5)
tm.assert_series_equal(res, exp)
res = df.quantile([0.5])
exp = DataFrame([[np.nan, np.nan]], columns=['a', 'b'], index=[0.5])
tm.assert_frame_equal(res, exp)
# FIXME (gives empty frame in 0.18.1, broken in 0.19.0)
# res = df.quantile(0.5, axis=1)
# res = df.quantile([0.5], axis=1)
# ints
df = DataFrame(columns=['a', 'b'], dtype='int64')
# FIXME (gives empty frame in 0.18.1, broken in 0.19.0)
# res = df.quantile(0.5)
# datetimes
df = DataFrame(columns=['a', 'b'], dtype='datetime64[ns]')
# FIXME (gives NaNs instead of NaT in 0.18.1 or 0.19.0)
# res = df.quantile(0.5, numeric_only=False)
| bsd-3-clause |
appapantula/scikit-learn | sklearn/lda.py | 72 | 17751 | """
Linear Discriminant Analysis (LDA)
"""
# Authors: Clemens Brunner
# Martin Billinger
# Matthieu Perrot
# Mathieu Blondel
# License: BSD 3-Clause
from __future__ import print_function
import warnings
import numpy as np
from scipy import linalg
from .externals.six import string_types
from .base import BaseEstimator, TransformerMixin
from .linear_model.base import LinearClassifierMixin
from .covariance import ledoit_wolf, empirical_covariance, shrunk_covariance
from .utils.multiclass import unique_labels
from .utils import check_array, check_X_y
from .utils.validation import check_is_fitted
from .utils.fixes import bincount
from .preprocessing import StandardScaler
__all__ = ['LDA']
def _cov(X, shrinkage=None):
"""Estimate covariance matrix (using optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None or 'empirical': no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
s : array, shape (n_features, n_features)
Estimated covariance matrix.
"""
shrinkage = "empirical" if shrinkage is None else shrinkage
if isinstance(shrinkage, string_types):
if shrinkage == 'auto':
sc = StandardScaler() # standardize features
X = sc.fit_transform(X)
s = ledoit_wolf(X)[0]
s = sc.std_[:, np.newaxis] * s * sc.std_[np.newaxis, :] # rescale
elif shrinkage == 'empirical':
s = empirical_covariance(X)
else:
raise ValueError('unknown shrinkage parameter')
elif isinstance(shrinkage, float) or isinstance(shrinkage, int):
if shrinkage < 0 or shrinkage > 1:
raise ValueError('shrinkage parameter must be between 0 and 1')
s = shrunk_covariance(empirical_covariance(X), shrinkage)
else:
raise TypeError('shrinkage must be of string or int type')
return s
def _class_means(X, y):
"""Compute class means.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
means : array-like, shape (n_features,)
Class means.
"""
means = []
classes = np.unique(y)
for group in classes:
Xg = X[y == group, :]
means.append(Xg.mean(0))
return np.asarray(means)
def _class_cov(X, y, priors=None, shrinkage=None):
"""Compute class covariance matrix.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
priors : array-like, shape (n_classes,)
Class priors.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
cov : array-like, shape (n_features, n_features)
Class covariance matrix.
"""
classes = np.unique(y)
covs = []
for group in classes:
Xg = X[y == group, :]
covs.append(np.atleast_2d(_cov(Xg, shrinkage)))
return np.average(covs, axis=0, weights=priors)
class LDA(BaseEstimator, LinearClassifierMixin, TransformerMixin):
"""Linear Discriminant Analysis (LDA).
A classifier with a linear decision boundary, generated by fitting class
conditional densities to the data and using Bayes' rule.
The model fits a Gaussian density to each class, assuming that all classes
share the same covariance matrix.
The fitted model can also be used to reduce the dimensionality of the input
by projecting it to the most discriminative directions.
Read more in the :ref:`User Guide <lda_qda>`.
Parameters
----------
solver : string, optional
Solver to use, possible values:
- 'svd': Singular value decomposition (default). Does not compute the
covariance matrix, therefore this solver is recommended for
data with a large number of features.
- 'lsqr': Least squares solution, can be combined with shrinkage.
- 'eigen': Eigenvalue decomposition, can be combined with shrinkage.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Note that shrinkage works only with 'lsqr' and 'eigen' solvers.
priors : array, optional, shape (n_classes,)
Class priors.
n_components : int, optional
Number of components (< n_classes - 1) for dimensionality reduction.
store_covariance : bool, optional
Additionally compute class covariance matrix (default False).
tol : float, optional
Threshold used for rank estimation in SVD solver.
Attributes
----------
coef_ : array, shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : array, shape (n_features,)
Intercept term.
covariance_ : array-like, shape (n_features, n_features)
Covariance matrix (shared by all classes).
means_ : array-like, shape (n_classes, n_features)
Class means.
priors_ : array-like, shape (n_classes,)
Class priors (sum to 1).
scalings_ : array-like, shape (rank, n_classes - 1)
Scaling of the features in the space spanned by the class centroids.
xbar_ : array-like, shape (n_features,)
Overall mean.
classes_ : array-like, shape (n_classes,)
Unique class labels.
See also
--------
sklearn.qda.QDA: Quadratic discriminant analysis
Notes
-----
The default solver is 'svd'. It can perform both classification and
transform, and it does not rely on the calculation of the covariance
matrix. This can be an advantage in situations where the number of features
is large. However, the 'svd' solver cannot be used with shrinkage.
The 'lsqr' solver is an efficient algorithm that only works for
classification. It supports shrinkage.
The 'eigen' solver is based on the optimization of the between class
scatter to within class scatter ratio. It can be used for both
classification and transform, and it supports shrinkage. However, the
'eigen' solver needs to compute the covariance matrix, so it might not be
suitable for situations with a high number of features.
Examples
--------
>>> import numpy as np
>>> from sklearn.lda import LDA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = LDA()
>>> clf.fit(X, y)
LDA(n_components=None, priors=None, shrinkage=None, solver='svd',
store_covariance=False, tol=0.0001)
>>> print(clf.predict([[-0.8, -1]]))
[1]
"""
def __init__(self, solver='svd', shrinkage=None, priors=None,
n_components=None, store_covariance=False, tol=1e-4):
self.solver = solver
self.shrinkage = shrinkage
self.priors = priors
self.n_components = n_components
self.store_covariance = store_covariance # used only in svd solver
self.tol = tol # used only in svd solver
def _solve_lsqr(self, X, y, shrinkage):
"""Least squares solver.
The least squares solver computes a straightforward solution of the
optimal decision rule based directly on the discriminant functions. It
can only be used for classification (with optional shrinkage), because
estimation of eigenvectors is not performed. Therefore, dimensionality
reduction with the transform is not supported.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_classes)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Notes
-----
This solver is based on [1]_, section 2.6.2, pp. 39-41.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
self.coef_ = linalg.lstsq(self.covariance_, self.means_.T)[0].T
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T))
+ np.log(self.priors_))
def _solve_eigen(self, X, y, shrinkage):
"""Eigenvalue solver.
The eigenvalue solver computes the optimal solution of the Rayleigh
coefficient (basically the ratio of between class scatter to within
class scatter). This solver supports both classification and
dimensionality reduction (with optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage constant.
Notes
-----
This solver is based on [1]_, section 3.8.3, pp. 121-124.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
Sw = self.covariance_ # within scatter
St = _cov(X, shrinkage) # total scatter
Sb = St - Sw # between scatter
evals, evecs = linalg.eigh(Sb, Sw)
evecs = evecs[:, np.argsort(evals)[::-1]] # sort eigenvectors
# evecs /= np.linalg.norm(evecs, axis=0) # doesn't work with numpy 1.6
evecs /= np.apply_along_axis(np.linalg.norm, 0, evecs)
self.scalings_ = evecs
self.coef_ = np.dot(self.means_, evecs).dot(evecs.T)
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T))
+ np.log(self.priors_))
def _solve_svd(self, X, y, store_covariance=False, tol=1.0e-4):
"""SVD solver.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
store_covariance : bool, optional
Additionally compute class covariance matrix (default False).
tol : float, optional
Threshold used for rank estimation.
"""
n_samples, n_features = X.shape
n_classes = len(self.classes_)
self.means_ = _class_means(X, y)
if store_covariance:
self.covariance_ = _class_cov(X, y, self.priors_)
Xc = []
for idx, group in enumerate(self.classes_):
Xg = X[y == group, :]
Xc.append(Xg - self.means_[idx])
self.xbar_ = np.dot(self.priors_, self.means_)
Xc = np.concatenate(Xc, axis=0)
# 1) within (univariate) scaling by with classes std-dev
std = Xc.std(axis=0)
# avoid division by zero in normalization
std[std == 0] = 1.
fac = 1. / (n_samples - n_classes)
# 2) Within variance scaling
X = np.sqrt(fac) * (Xc / std)
# SVD of centered (within)scaled data
U, S, V = linalg.svd(X, full_matrices=False)
rank = np.sum(S > tol)
if rank < n_features:
warnings.warn("Variables are collinear.")
# Scaling of within covariance is: V' 1/S
scalings = (V[:rank] / std).T / S[:rank]
# 3) Between variance scaling
# Scale weighted centers
X = np.dot(((np.sqrt((n_samples * self.priors_) * fac)) *
(self.means_ - self.xbar_).T).T, scalings)
# Centers are living in a space with n_classes-1 dim (maximum)
# Use SVD to find projection in the space spanned by the
# (n_classes) centers
_, S, V = linalg.svd(X, full_matrices=0)
rank = np.sum(S > tol * S[0])
self.scalings_ = np.dot(scalings, V.T[:, :rank])
coef = np.dot(self.means_ - self.xbar_, self.scalings_)
self.intercept_ = (-0.5 * np.sum(coef ** 2, axis=1)
+ np.log(self.priors_))
self.coef_ = np.dot(coef, self.scalings_.T)
self.intercept_ -= np.dot(self.xbar_, self.coef_.T)
def fit(self, X, y, store_covariance=False, tol=1.0e-4):
"""Fit LDA model according to the given training data and parameters.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array, shape (n_samples,)
Target values.
"""
if store_covariance:
warnings.warn("'store_covariance' was moved to the __init__()"
"method in version 0.16 and will be removed from"
"fit() in version 0.18.", DeprecationWarning)
else:
store_covariance = self.store_covariance
if tol != 1.0e-4:
warnings.warn("'tol' was moved to __init__() method in version"
" 0.16 and will be removed from fit() in 0.18",
DeprecationWarning)
self.tol = tol
X, y = check_X_y(X, y)
self.classes_ = unique_labels(y)
if self.priors is None: # estimate priors from sample
_, y_t = np.unique(y, return_inverse=True) # non-negative ints
self.priors_ = bincount(y_t) / float(len(y))
else:
self.priors_ = self.priors
if self.solver == 'svd':
if self.shrinkage is not None:
raise NotImplementedError('shrinkage not supported')
self._solve_svd(X, y, store_covariance=store_covariance, tol=tol)
elif self.solver == 'lsqr':
self._solve_lsqr(X, y, shrinkage=self.shrinkage)
elif self.solver == 'eigen':
self._solve_eigen(X, y, shrinkage=self.shrinkage)
else:
raise ValueError("unknown solver {} (valid solvers are 'svd', "
"'lsqr', and 'eigen').".format(self.solver))
if self.classes_.size == 2: # treat binary case as a special case
self.coef_ = np.array(self.coef_[1, :] - self.coef_[0, :], ndmin=2)
self.intercept_ = np.array(self.intercept_[1] - self.intercept_[0],
ndmin=1)
return self
def transform(self, X):
"""Project data to maximize class separation.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data.
"""
if self.solver == 'lsqr':
raise NotImplementedError("transform not implemented for 'lsqr' "
"solver (use 'svd' or 'eigen').")
check_is_fitted(self, ['xbar_', 'scalings_'], all_or_any=any)
X = check_array(X)
if self.solver == 'svd':
X_new = np.dot(X - self.xbar_, self.scalings_)
elif self.solver == 'eigen':
X_new = np.dot(X, self.scalings_)
n_components = X.shape[1] if self.n_components is None \
else self.n_components
return X_new[:, :n_components]
def predict_proba(self, X):
"""Estimate probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated probabilities.
"""
prob = self.decision_function(X)
prob *= -1
np.exp(prob, prob)
prob += 1
np.reciprocal(prob, prob)
if len(self.classes_) == 2: # binary case
return np.column_stack([1 - prob, prob])
else:
# OvR normalization, like LibLinear's predict_probability
prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))
return prob
def predict_log_proba(self, X):
"""Estimate log probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated log probabilities.
"""
return np.log(self.predict_proba(X))
| bsd-3-clause |
mompiou/stereo-proj | stereo-proj-pyqt.py | 1 | 88031 | #!/usr/bin/python
######################################################################
#
#
# Stereo-Proj is a python utility to plot stereographic projetion of a given crystal. It is designed
# to be used in electron microscopy experiments.
# Author: F. Mompiou, CEMES-CNRS
#
#
#######################################################################
from __future__ import division
import numpy as np
from PyQt4 import QtGui, QtCore
import sys
import random
import os
from PIL import Image
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib import pyplot as plt
import matplotlib as mpl
import stereoprojUI
import intersectionsUI
import angleUI
import schmidUI
import xyzUI
import hkl_uvwUI
import widthUI
import kikuchiUI
################
# Misc
################
def unique_rows(a):
a = np.ascontiguousarray(a)
unique_a = np.unique(a.view([('', a.dtype)]*a.shape[1]))
return unique_a.view(a.dtype).reshape((unique_a.shape[0], a.shape[1]))
def GCD(a, b, rtol = 1e-05, atol = 1e-08):
t = min(abs(a), abs(b))
while abs(b) > rtol * t + atol:
a, b = b, a % b
return a
###################################################################"
##### Projection
####################################################################
def proj(x,y,z):
if z==1:
X=0
Y=0
elif z<-0.000001:
X='nan'
Y='nan'
else:
X=x/(1+z)
Y=y/(1+z)
return np.array([X,Y],float)
def proj2(x,y,z):
if z==1:
X=0
Y=0
elif z<-0.000001:
X=-x/(1-z)
Y=-y/(1-z)
else:
X=x/(1+z)
Y=y/(1+z)
return np.array([X,Y,z],float)
def proj_gnomonic(x,y,z):
if z==0:
X=x
Y=y
else:
X=x/z
Y=y/z
return np.array([X,Y],float)
#def proj_ortho(x,y,z):
#
# return np.array([x,y],float)
###################################################################
# Rotation Euler
#
##################################################################
def rotation(phi1,phi,phi2):
phi1=phi1*np.pi/180;
phi=phi*np.pi/180;
phi2=phi2*np.pi/180;
R=np.array([[np.cos(phi1)*np.cos(phi2)-np.cos(phi)*np.sin(phi1)*np.sin(phi2),
-np.cos(phi)*np.cos(phi2)*np.sin(phi1)-np.cos(phi1)*
np.sin(phi2),np.sin(phi)*np.sin(phi1)],[np.cos(phi2)*np.sin(phi1)
+np.cos(phi)*np.cos(phi1)*np.sin(phi2),np.cos(phi)*np.cos(phi1)
*np.cos(phi2)-np.sin(phi1)*np.sin(phi2), -np.cos(phi1)*np.sin(phi)],
[np.sin(phi)*np.sin(phi2), np.cos(phi2)*np.sin(phi), np.cos(phi)]],float)
return R
###################################################################
# Rotation around a given axis
#
##################################################################
def Rot(th,a,b,c):
th=th*np.pi/180
no=np.linalg.norm([a,b,c])
aa=a/no
bb=b/no
cc=c/no
c1=np.array([[1,0,0],[0,1,0],[0,0,1]],float)
c2=np.array([[aa**2,aa*bb,aa*cc],[bb*aa,bb**2,bb*cc],[cc*aa,
cc*bb,cc**2]],float)
c3=np.array([[0,-cc,bb],[cc,0,-aa],[-bb,aa,0]],float)
R=np.cos(th)*c1+(1-np.cos(th))*c2+np.sin(th)*c3
return R
#######################
#
# Layout functions
#
#######################
def color_trace():
color_trace=1
if ui.color_trace_bleu.isChecked():
color_trace=1
if ui.color_trace_bleu.isChecked():
color_trace=2
if ui.color_trace_rouge.isChecked():
color_trace=3
return color_trace
def var_uvw():
var_uvw=0
if ui.uvw_button.isChecked():
var_uvw=1
return var_uvw
def var_hexa():
var_hexa=0
if ui.hexa_button.isChecked():
var_hexa=1
return var_hexa
def var_carre():
var_carre=0
if ui.style_box.isChecked():
var_carre=1
return var_carre
####################################################################
#
# Crystal definition
#
##################################################################
def crist():
global axes,axesh,D,Dstar,V,G
abc=ui.abc_entry.text().split(",")
a=np.float(abc[0])*1e-10
b=np.float(abc[1])*1e-10
c=np.float(abc[2])*1e-10
alphabetagamma=ui.alphabetagamma_entry.text().split(",")
alpha=np.float(alphabetagamma[0])
beta=np.float(alphabetagamma[1])
gamma=np.float(alphabetagamma[2])
e=np.int(ui.e_entry.text())
d2=np.float(ui.d_label_var.text())
alpha=alpha*np.pi/180
beta=beta*np.pi/180
gamma=gamma*np.pi/180
V=a*b*c*np.sqrt(1-(np.cos(alpha)**2)-(np.cos(beta))**2-(np.cos(gamma))**2+2*np.cos(alpha)*np.cos(beta)*np.cos(gamma))
D=np.array([[a,b*np.cos(gamma),c*np.cos(beta)],[0,b*np.sin(gamma), c*(np.cos(alpha)-np.cos(beta)*np.cos(gamma))/np.sin(gamma)],[0,0,V/(a*b*np.sin(gamma))]])
Dstar=np.transpose(np.linalg.inv(D))
G=np.array([[a**2,a*b*np.cos(gamma),a*c*np.cos(beta)],[a*b*np.cos(gamma),b**2,b*c*np.cos(alpha)],[a*c*np.cos(beta),b*c*np.cos(alpha),c**2]])
axes=np.zeros(((2*e+1)**3-1,3))
axesh=np.zeros(((2*e+1)**3-1,7))
axesh[:,4]=color_trace()
id=0
for i in range(-e,e+1):
for j in range(-e,e+1):
for k in range(-e,e+1):
if (i,j,k)!=(0,0,0):
d=1/(np.sqrt(np.dot(np.array([i,j,k]),np.dot(np.linalg.inv(G),np.array([i,j,k])))))
if d>d2*0.1*np.amax([a,b,c]):
if var_uvw()==0:
Ma=np.dot(Dstar,np.array([i,j,k],float))
axesh[id,3]=0
else:
Ma=np.dot(D,np.array([i,j,k],float))
axesh[id,3]=1
m=np.abs(reduce(lambda x,y:GCD(x,y),[i,j,k]))
if (np.around(i/m)==i/m) & (np.around(j/m)==j/m) & (np.around(k/m)==k/m):
axes[id,:]=np.array([i,j,k])/m
else:
axes[id,:]=np.array([i,j,k])
axesh[id,0:3]=Ma/np.linalg.norm(Ma)
axesh[id,5]=1
axesh[id,6]=1
id=id+1
axesh=axesh[~np.all(axesh[:,0:3]==0, axis=1)]
axes=axes[~np.all(axes==0, axis=1)]
return axes,axesh,D,Dstar,V,G
###############################################
#
# Switch to reciprocal indices with size indicating the intensity
# Need as input a file with the atoms in the cells to get the structure factor
#
###############################################
def lattice_reciprocal():
if ui.reciprocal_checkBox.isChecked():
crist_reciprocal()
else:
undo_crist_reciprocal()
def crist_reciprocal():
global axes,axesh, naxes
for z in range(0, np.shape(axes)[0]):
if z<(np.shape(axes)[0]-naxes):
I,h,k,l=extinction(ui.space_group_Box.currentText(),axes[z,0],axes[z,1],axes[z,2],np.int(ui.e_entry.text()),0)
else:
I,h,k,l=extinction(ui.space_group_Box.currentText(),axes[z,0],axes[z,1],axes[z,2],10000,0)
if I>0:
if var_uvw()==0:
axesh[z,3]=0
else:
axesh[z,3]=1
axesh[z,5]=I
axesh[z,6]=1
axes[z,:]=np.array([h,k,l])
else:
axesh[z,0:3]=np.array([0,0,0])
if var_uvw()==0:
axesh[z,3]=0
else:
axesh[z,3]=1
axesh[z,5]=1
axesh[z,6]=1
axes[z,:]=np.array([0,0,0])
axesh=axesh[~np.all(axesh[:,0:3]==0, axis=1)]
axes=axes[~np.all(axes==0, axis=1)]
return axes,axesh,naxes
def undo_crist_reciprocal():
global axes,axesh,naxes,dmip
if naxes!=0:
extra_axes=axes[-naxes:,:]
extra_axesh=axesh[-naxes:,:]
for i in range(0,np.shape(extra_axes)[0]):
m=reduce(lambda x,y:GCD(x,y),extra_axes[i,:])
extra_axes[i,:]=extra_axes[i,:]/m
if var_uvw()==0:
extra_axesh[i,3]=0
else:
extra_axesh[i,3]=1
extra_axesh[i,5]=1
extra_axesh[i,6]=1
crist()
axes=np.vstack((axes,extra_axes))
axesh=np.vstack((axesh,extra_axesh))
else:
crist()
return axes, axesh,naxes
def extinction(space_group,h,k,l,lim,diff):
global x_space,G,x_scatt
h0=h
k0=k
l0=l
for i in range(0,len(x_space)):
if space_group==x_space[i][0]:
s0=i
while np.amax([np.abs(h0),np.abs(k0),np.abs(l0)])<=lim:
F=0
s=s0
while (s<(len(x_space)-1) and (len(x_space[s+1])==4)):
q=2*np.pi*np.sqrt(np.dot(np.array([h0,k0,l0]),np.dot(np.linalg.inv(G),np.array([h0,k0,l0]))))*1e-10
f=str(x_space[s+1][0])
for z in range(0,len(x_scatt)):
if f==x_scatt[z][0]:
f=eval(x_scatt[z][1])*np.exp(-eval(x_scatt[z][2])*(q/4/np.pi)**2)+eval(x_scatt[z][3])*np.exp(-eval(x_scatt[z][4])*(q/4/np.pi)**2)+eval(x_scatt[z][5])*np.exp(-eval(x_scatt[z][6])*(q/4/np.pi)**2)+eval(x_scatt[z][7])*np.exp(-eval(x_scatt[z][8])*(q/4/np.pi)**2)+eval(x_scatt[z][9])
F=F+f*np.exp(2j*np.pi*(eval(x_space[s+1][1])*h0+eval(x_space[s+1][2])*k0+eval(x_space[s+1][3])*l0))
s=s+1
I=np.around(float(np.real(F*np.conj(F))),decimals=2)
if diff==0:
if I>0:
break
else:
h0=2*h0
k0=2*k0
l0=2*l0
else:
break
return I,h0,k0,l0
######################################################
#
# Reduce number of poles/directions as a function of d-spacing (plus or minus)
#
#######################################################
def dist_restrict():
global G,axes,axesh
abc=ui.abc_entry.text().split(",")
a=np.float(abc[0])*1e-10
b=np.float(abc[1])*1e-10
c=np.float(abc[2])*1e-10
d2=np.float(ui.d_label_var.text())
for i in range(0,np.shape(axes)[0]):
d=1/(np.sqrt(np.dot(axes[i,:],np.dot(np.linalg.inv(G),axes[i,:]))))
if d<d2*0.1*np.amax([a,b,c]):
axesh[i,6]=0
else:
axesh[i,6]=1
trace()
def dm():
global dmip
dmip=dmip-np.float(ui.d_entry.text())
ui.d_label_var.setText(str(dmip))
dist_restrict()
return dmip
def dp():
global dmip, a,axes,axesh
dmip=dmip+np.float(ui.d_entry.text())
ui.d_label_var.setText(str(dmip))
dist_restrict()
return dmip
####################################################################
#
# Plot iso-schmid factor, ie for a given plan the locus of b with a given schmid factor (Oy direction
# assumed to be the straining axis
#
####################################################################
def schmid_trace():
global M,axes,axesh,T,V,D,Dstar,trP,tr_schmid,a,minx,maxx,miny,maxy,trC
pole_entry=ui.pole_entry.text().split(",")
pole1=np.float(pole_entry[0])
pole2=np.float(pole_entry[1])
pole3=np.float(pole_entry[2])
tr_schmid=np.vstack((tr_schmid,np.array([pole1,pole2,pole3])))
trace()
def undo_schmid_trace():
global M,axes,axesh,T,V,D,Dstar,trP,tr_schmid,a,minx,maxx,miny,maxy,trC
pole_entry=ui.pole_entry.text().split(",")
pole1=np.float(pole_entry[0])
pole2=np.float(pole_entry[1])
pole3=np.float(pole_entry[2])
tr_s=tr_schmid
for i in range(1,tr_schmid.shape[0]):
if tr_schmid[i,0]==pole1 and tr_schmid[i,1]==pole2 and tr_schmid[i,2]==pole3:
tr_s=np.delete(tr_schmid,i,0)
tr_schmid=tr_s
trace()
def fact(angle,r,t,n):
t_ang=ang_work_space()
x=r*np.cos(t)/n
y=r*np.sin(t)/n
C=np.dot(Rot(t_ang,0,0,1),np.array([x,y,0]))
x=C[0]
y=C[1]
f=np.cos(angle)*2*y/((1+x**2+y**2))
return f
def schmid_trace2(C):
global D, Dstar,M,a
for h in range(1,tr_schmid.shape[0]):
b1=C[h,0]
b2=C[h,1]
b3=C[h,2]
b=np.array([b1,b2,b3])
if var_uvw()==0:
bpr=np.dot(Dstar,b)/np.linalg.norm(np.dot(Dstar,b))
else:
bpr=np.dot(Dstar,b)/np.linalg.norm(np.dot(Dstar,b))
bpr2=np.dot(M,bpr)
t_ang=-ang_work_space()
T=np.dot(Rot(t_ang,0,0,1),np.array([0,1,0]))
angleb=np.arccos(np.dot(bpr2,T)/np.linalg.norm(bpr2))
n=300
r=np.linspace(0,n,100)
t=np.linspace(0,2*np.pi,100)
r,t=np.meshgrid(r,t)
F=fact(angleb,r,t,n)
lev=[-0.5,-0.4,-0.3,-0.2,0.2,0.3,0.4,0.5]
CS=a.contour(r*np.cos(t)+300, r*np.sin(t)+300, F,lev,linewidths=2)
fmt = {}
strs = [ '('+str(np.int(b1))+str(np.int(b2))+str(np.int(b3))+') -0.5','('+str(np.int(b1))+str(np.int(b2))+str(np.int(b3))+') -0.4','('+str(np.int(b1))+str(np.int(b2))+str(np.int(b3))+') -0.3','('+str(np.int(b1))+str(np.int(b2))+str(np.int(b3))+') -0.2','('+str(np.int(b1))+str(np.int(b2))+str(np.int(b3))+') 0.2','('+str(np.int(b1))+str(np.int(b2))+str(np.int(b3))+') 0.3','('+str(np.int(b1))+str(np.int(b2))+str(np.int(b3))+') 0.4','('+str(np.int(b1))+str(np.int(b2))+str(np.int(b3))+') 0.5']
for l,s in zip( CS.levels, strs ):
fmt[l] = s
a.clabel(CS,fmt=fmt,fontsize=10,inline=True)
###########################################################################
#
# Rotation of the sample. If Lock Axes is off rotation are along y,x,z directions. If not, the y and z axes
# of the sample are locked to the crystal axes when the check box is ticked.
# It mimics double-tilt holder (rotation of alpha along fixed x and rotation of beta along the beta tilt moving axis)
# or tilt-rotation holder (rotation of alpha along fixed # x and rotation of z along the z-rotation moving axis).
#
##########################################################################
def euler_label():
global M
if np.abs(M[2,2]-1)<0.0001:
phir=0
phi1r=0
phi2r=np.arctan2(M[1,0],M[0,0])*180/np.pi
else:
phir=np.arccos(M[2,2])*180/np.pi
phi2r=np.arctan2(M[2,0],M[2,1])*180/np.pi
phi1r=np.arctan2(M[0,2],-M[1,2])*180/np.pi
t=str(np.around(phi1r,decimals=1))+str(',')+str(np.around(phir,decimals=1))+str(',')+str(np.around(phi2r,decimals=1))
ui.angle_euler_label.setText(t)
def lock():
global M, var_lock,M_lock
if ui.lock_checkButton.isChecked():
var_lock=1
M_lock=M
else:
var_lock,M_lock=0,0
return var_lock,M_lock
def rot_alpha_p():
global angle_alpha,M,a,trP,trC,s_a
tha=s_a*np.float(ui.angle_alpha_entry.text())
t_ang=-ang_work_space()
t_a_y=np.dot(Rot(t_ang,0,0,1),np.array([0,1,0]))
M=np.dot(Rot(tha,t_a_y[0],t_a_y[1],t_a_y[2]),M)
trace()
euler_label()
angle_alpha=angle_alpha+np.float(ui.angle_alpha_entry.text())
ui.angle_alpha_label_2.setText(str(angle_alpha))
return angle_alpha,M
def rot_alpha_m():
global angle_alpha,M,a,trP,trC,s_a
tha=-s_a*np.float(ui.angle_alpha_entry.text())
t_ang=-ang_work_space()
t_a_y=np.dot(Rot(t_ang,0,0,1),np.array([0,1,0]))
M=np.dot(Rot(tha,t_a_y[0],t_a_y[1],t_a_y[2]),M)
trace()
euler_label()
angle_alpha=angle_alpha-np.float(ui.angle_alpha_entry.text())
ui.angle_alpha_label_2.setText(str(angle_alpha))
return angle_alpha,M
def rot_beta_m():
global angle_beta,M,angle_alpha, angle_z, var_lock, M_lock,s_b
t_ang=-ang_work_space()
t_a_x=np.dot(Rot(t_ang,0,0,1),np.array([1,0,0]))
if var_lock==0:
AxeY=t_a_x
else:
A=np.dot(np.linalg.inv(M_lock),t_a_x)
AxeY=np.dot(M,A)
thb=-s_b*np.float(ui.angle_beta_entry.text())
M=np.dot(Rot(thb,AxeY[0],AxeY[1],AxeY[2]),M)
trace()
euler_label()
angle_beta=angle_beta-np.float(ui.angle_beta_entry.text())
ui.angle_beta_label_2.setText(str(angle_beta))
return angle_beta,M
def rot_beta_p():
global angle_beta,M,angle_alpha, angle_z, var_lock, M_lock,s_b
t_ang=-ang_work_space()
t_a_x=np.dot(Rot(t_ang,0,0,1),np.array([1,0,0]))
if var_lock==0:
AxeY=t_a_x
else:
A=np.dot(np.linalg.inv(M_lock),t_a_x)
AxeY=np.dot(M,A)
thb=s_b*np.float(ui.angle_beta_entry.text())
M=np.dot(Rot(thb,AxeY[0],AxeY[1],AxeY[2]),M)
trace()
euler_label()
angle_beta=angle_beta+np.float(ui.angle_beta_entry.text())
ui.angle_beta_label_2.setText(str(angle_beta))
return angle_beta,M
def rot_z_m():
global angle_beta,M,angle_alpha, angle_z, var_lock, M_lock,s_z
if var_lock==0:
AxeZ=np.array([0,0,1])
else:
A=np.dot(np.linalg.inv(M_lock),np.array([0,0,1]))
AxeZ=np.dot(M,A)
thz=-s_z*np.float(ui.angle_z_entry.text())
M=np.dot(Rot(thz,AxeZ[0],AxeZ[1],AxeZ[2]),M)
trace()
euler_label()
angle_z=angle_z-np.float(ui.angle_z_entry.text())
ui.angle_z_label_2.setText(str(angle_z))
return angle_z,M
def rot_z_p():
global angle_beta,M,angle_alpha, angle_z, var_lock, M_lock,s_z
if var_lock==0:
AxeZ=np.array([0,0,1])
else:
A=np.dot(np.linalg.inv(M_lock),np.array([0,0,1]))
AxeZ=np.dot(M,A)
thz=s_z*np.float(ui.angle_z_entry.text())
M=np.dot(Rot(thz,AxeZ[0],AxeZ[1],AxeZ[2]),M)
trace()
euler_label()
angle_z=angle_z+np.float(ui.angle_z_entry.text())
ui.angle_z_label_2.setText(str(angle_z))
return angle_z,M
####################################################################
#
# Rotate around a given pole
#
####################################################################
def rotgm():
global g,M,Dstar,a
thg=-np.float(ui.rot_g_entry.text())
diff=ui.diff_entry.text().split(",")
diff1=np.float(diff[0])
diff2=np.float(diff[1])
diff3=np.float(diff[2])
A=np.array([diff1,diff2,diff3])
Ad=np.dot(Dstar,A)
Ap=np.dot(M,Ad)/np.linalg.norm(np.dot(M,Ad))
M=np.dot(Rot(thg,Ap[0],Ap[1],Ap[2]),M)
trace()
euler_label()
g=g-np.float(ui.rot_g_entry.text())
ui.rg_label.setText(str(g))
return g,M
def rotgp():
global g,M,D
thg=np.float(ui.rot_g_entry.text())
diff=ui.diff_entry.text().split(",")
diff1=np.float(diff[0])
diff2=np.float(diff[1])
diff3=np.float(diff[2])
A=np.array([diff1,diff2,diff3])
Ad=np.dot(Dstar,A)
Ap=np.dot(M,Ad)/np.linalg.norm(np.dot(M,Ad))
M=np.dot(Rot(thg,Ap[0],Ap[1],Ap[2]),M)
trace()
euler_label()
g=g+np.float(ui.rot_g_entry.text())
ui.rg_label.setText(str(g))
return g,M
####################################################################
#
# Add a given pole and equivalent ones
#
####################################################################
def pole(pole1,pole2,pole3):
global M,axes,axesh,T,V,D,Dstar,naxes
if var_hexa()==1:
if var_uvw()==1:
pole1a=2*pole1+pole2
pole2a=2*pole2+pole1
pole1=pole1a
pole2=pole2a
Gs=np.array([pole1,pole2,pole3],float)
if var_uvw()==0:
Gsh=np.dot(Dstar,Gs)/np.linalg.norm(np.dot(Dstar,Gs))
else:
Gsh=np.dot(D,Gs)/np.linalg.norm(np.dot(D,Gs))
S=np.dot(M,Gsh)
if S[2]<0:
S=-S
Gsh=-Gsh
pole1=-pole1
pole2=-pole2
pole3=-pole3
T=np.vstack((T,np.array([S[0],S[1],S[2]])))
if ui.reciprocal_checkBox.isChecked():
I,h,k,l=extinction(ui.space_group_Box.currentText(),pole1,pole2,pole3,100000,0)
if I>0:
axes=np.vstack((axes,np.array([h,k,l])))
axes=np.vstack((axes,np.array([-h,-k,-l])))
if var_uvw()==0 :
axesh=np.vstack((axesh,np.array([Gsh[0],Gsh[1],Gsh[2],0,color_trace(),I,1])))
axesh=np.vstack((axesh,np.array([-Gsh[0],-Gsh[1],-Gsh[2],0,color_trace(),I,1])))
else:
axesh=np.vstack((axesh,np.array([Gsh[0],Gsh[1],Gsh[2],1,color_trace(),I,1])))
axesh=np.vstack((axesh,np.array([-Gsh[0],-Gsh[1],-Gsh[2],1,color_trace(),I,1])))
else:
axes=np.vstack((axes,np.array([pole1,pole2,pole3])))
axes=np.vstack((axes,np.array([-pole1,-pole2,-pole3])))
if var_uvw()==0 :
axesh=np.vstack((axesh,np.array([Gsh[0],Gsh[1],Gsh[2],0,color_trace(),0,1])))
axesh=np.vstack((axesh,np.array([-Gsh[0],-Gsh[1],-Gsh[2],0,color_trace(),0,1])))
else:
axesh=np.vstack((axesh,np.array([Gsh[0],Gsh[1],Gsh[2],1,color_trace(),0,1])))
axesh=np.vstack((axesh,np.array([-Gsh[0],-Gsh[1],-Gsh[2],1,color_trace(),0,1])))
naxes=naxes+2
return axes,axesh,T,naxes
def undo_pole(pole1,pole2,pole3):
global M,axes,axesh,T,V,D,Dstar,naxes
if var_hexa()==1:
if var_uvw()==1:
pole1a=2*pole1+pole2
pole2a=2*pole2+pole1
pole1=pole1a
pole2=pole2a
Gs=np.array([pole1,pole2,pole3],float)
if var_uvw()==0:
Gsh=np.dot(Dstar,Gs)/np.linalg.norm(np.dot(Dstar,Gs))
else:
Gsh=np.dot(D,Gs)/np.linalg.norm(np.dot(D,Gs))
S=np.dot(M,Gsh)
if S[2]<0:
S=-S
Gsh=-Gsh
pole1=-pole1
pole2=-pole2
pole3=-pole3
if ui.reciprocal_checkBox.isChecked():
I,h,k,l=extinction(ui.space_group_Box.currentText(),pole1,pole2,pole3,100000,0)
if I>0:
pole1=h
pole2=k
pole3=l
ind=np.where((axes[:,0]==pole1) & (axes[:,1]==pole2)& (axes[:,2]==pole3)|(axes[:,0]==-pole1) & (axes[:,1]==-pole2)& (axes[:,2]==-pole3))
else:
m=reduce(lambda x,y:GCD(x,y),[pole1,pole2,pole3])
ind=np.where((axes[:,0]==pole1) & (axes[:,1]==pole2)& (axes[:,2]==pole3)|(axes[:,0]==-pole1) & (axes[:,1]==-pole2)& (axes[:,2]==-pole3))
axes=np.delete(axes,ind,0)
T=np.delete(T,ind,0)
axesh=np.delete(axesh,ind,0)
naxes=naxes-2
return axes,axesh,T,naxes
def d(pole1,pole2,pole3):
global M,axes,axesh,T,V,D,Dstar
abc=ui.abc_entry.text().split(",")
a=np.float(abc[0])
b=np.float(abc[1])
c=np.float(abc[2])
alphabetagamma=ui.alphabetagamma_entry.text().split(",")
alpha=np.float(alphabetagamma[0])*np.pi/180
beta=np.float(alphabetagamma[1])*np.pi/180
gamma=np.float(alphabetagamma[2])*np.pi/180
G=np.array([[a**2,a*b*np.cos(gamma),a*c*np.cos(beta)],[a*b*np.cos(gamma),b**2,b*c*np.cos(alpha)],[a*c*np.cos(beta),b*c*np.cos(alpha),c**2]])
ds=(np.sqrt(np.dot(np.array([pole1,pole2,pole3]),np.dot(np.linalg.inv(G),np.array([pole1,pole2,pole3])))))
return ds
def addpole_sym():
global M,axes,axesh,T,V,D,Dstar,G
pole_entry=ui.pole_entry.text().split(",")
pole1=np.float(pole_entry[0])
pole2=np.float(pole_entry[1])
pole3=np.float(pole_entry[2])
abc=ui.abc_entry.text().split(",")
a=np.float(abc[0])
b=np.float(abc[1])
c=np.float(abc[2])
alphabetagamma=ui.alphabetagamma_entry.text().split(",")
alpha=np.float(alphabetagamma[0])*np.pi/180
beta=np.float(alphabetagamma[1])*np.pi/180
gamma=np.float(alphabetagamma[2])*np.pi/180
G=np.array([[a**2,a*b*np.cos(gamma),a*c*np.cos(beta)],[a*b*np.cos(gamma),b**2,b*c*np.cos(alpha)],[a*c*np.cos(beta),b*c*np.cos(alpha),c**2]])
v=d(pole1,pole2,pole3)
pole(pole1,pole2,pole3)
if np.abs(alpha-np.pi/2)<0.001 and np.abs(beta-np.pi/2)<0.001 and np.abs(gamma-2*np.pi/3)<0.001:
pole(pole1,pole2,pole3)
pole(pole1,pole2,-pole3)
pole(pole2,pole1,pole3)
pole(pole2,pole1,-pole3)
pole(-pole1-pole2,pole2,pole3)
pole(-pole1-pole2,pole2,-pole3)
pole(pole1,-pole1-pole2,pole3)
pole(pole1,-pole1-pole2,-pole3)
pole(pole2,-pole1-pole2,pole3)
pole(pole2,-pole1-pole2,-pole3)
pole(-pole1-pole2,pole1,pole3)
pole(-pole1-pole2,pole1,-pole3)
else:
if np.abs(d(pole1,pole2,-pole3)-v)<0.001:
pole(pole1,pole2,-pole3)
if np.abs(d(pole1,-pole2,pole3)-v)<0.001:
pole(pole1,-pole2,pole3)
if np.abs(d(-pole1,pole2,pole3)-v)<0.001:
pole(-pole1,pole2,pole3)
if np.abs(d(pole2,pole1,pole3)-v)<0.001:
pole(pole2,pole1,pole3)
if np.abs(d(pole2,pole1,-pole3)-v)<0.001:
pole(pole2,pole1,-pole3)
if np.abs(d(pole2,-pole1,pole3)-v)<0.001:
pole(pole2,-pole1,pole3)
if np.abs(d(-pole2,pole1,pole3)-v)<0.001:
pole(-pole2,pole1,pole3)
if np.abs(d(pole2,pole3,pole1)-v)<0.001:
pole(pole2,pole3,pole1)
if np.abs(d(pole2,pole3,pole1)-v)<0.001:
pole(pole2,pole3,-pole1)
if np.abs(d(pole2,-pole3,pole1)-v)<0.001:
pole(pole2,-pole3,pole1)
if np.abs(d(-pole2,pole3,pole1)-v)<0.001:
pole(-pole2,pole3,pole1)
if np.abs(d(pole1,pole3,pole2)-v)<0.001:
pole(pole1,pole3,pole2)
if np.abs(d(pole1,pole3,-pole2)-v)<0.001:
pole(pole1,pole3,-pole2)
if np.abs(d(pole1,-pole3,pole2)-v)<0.001:
pole(pole1,-pole3,pole2)
if np.abs(d(-pole1,pole3,pole2)-v)<0.001:
pole(-pole1,pole3,pole2)
if np.abs(d(pole3,pole1,pole2)-v)<0.001:
pole(pole3,pole1,pole2)
if np.abs(d(pole3,pole1,-pole2)-v)<0.001:
pole(pole3,pole1,-pole2)
if np.abs(d(pole3,-pole1,pole2)-v)<0.001:
pole(pole3,-pole1,pole2)
if np.abs(d(-pole3,pole1,pole2)-v)<0.001:
pole(-pole3,pole1,pole2)
if np.abs(d(pole3,pole2,pole1)-v)<0.001:
pole(pole3,pole2,pole1)
if np.abs(d(pole3,pole2,-pole1)-v)<0.001:
pole(pole3,pole2,-pole1)
if np.abs(d(pole3,-pole2,pole1)-v)<0.001:
pole(pole3,-pole2,pole1)
if np.abs(d(pole3,pole2,pole1)-v)<0.001:
pole(pole3,pole2,pole1)
trace()
def undo_sym():
global M,axes,axesh,T,V,D,Dstar,G
pole_entry=ui.pole_entry.text().split(",")
pole1=np.float(pole_entry[0])
pole2=np.float(pole_entry[1])
pole3=np.float(pole_entry[2])
abc=ui.abc_entry.text().split(",")
a=np.float(abc[0])
b=np.float(abc[1])
c=np.float(abc[2])
alphabetagamma=ui.alphabetagamma_entry.text().split(",")
alpha=np.float(alphabetagamma[0])*np.pi/180
beta=np.float(alphabetagamma[1])*np.pi/180
gamma=np.float(alphabetagamma[2])*np.pi/180
G=np.array([[a**2,a*b*np.cos(gamma),a*c*np.cos(beta)],[a*b*np.cos(gamma),b**2,b*c*np.cos(alpha)],[a*c*np.cos(beta),b*c*np.cos(alpha),c**2]])
v=d(pole1,pole2,pole3)
undo_pole(pole1,pole2,pole3)
if np.abs(alpha-np.pi/2)<0.001 and np.abs(beta-np.pi/2)<0.001 and np.abs(gamma-2*np.pi/3)<0.001:
undo_pole(pole1,pole2,pole3)
undo_pole(pole1,pole2,-pole3)
undo_pole(pole2,pole1,pole3)
undo_pole(pole2,pole1,-pole3)
undo_pole(-pole1-pole2,pole2,pole3)
undo_pole(-pole1-pole2,pole2,-pole3)
undo_pole(pole1,-pole1-pole2,pole3)
undo_pole(pole1,-pole1-pole2,-pole3)
undo_pole(pole2,-pole1-pole2,pole3)
undo_pole(pole2,-pole1-pole2,-pole3)
undo_pole(-pole1-pole2,pole1,pole3)
undo_pole(-pole1-pole2,pole1,-pole3)
else:
if np.abs(d(pole1,pole2,-pole3)-v)<0.001:
undo_pole(pole1,pole2,-pole3)
if np.abs(d(pole1,-pole2,pole3)-v)<0.001:
undo_pole(pole1,-pole2,pole3)
if np.abs(d(-pole1,pole2,pole3)-v)<0.001:
undo_pole(-pole1,pole2,pole3)
if np.abs(d(pole2,pole1,pole3)-v)<0.001:
undo_pole(pole2,pole1,pole3)
if np.abs(d(pole2,pole1,-pole3)-v)<0.001:
undo_pole(pole2,pole1,-pole3)
if np.abs(d(pole2,-pole1,pole3)-v)<0.001:
undo_pole(pole2,-pole1,pole3)
if np.abs(d(-pole2,pole1,pole3)-v)<0.001:
undo_pole(-pole2,pole1,pole3)
if np.abs(d(pole2,pole3,pole1)-v)<0.001:
undo_pole(pole2,pole3,pole1)
if np.abs(d(pole2,pole3,pole1)-v)<0.001:
undo_pole(pole2,pole3,-pole1)
if np.abs(d(pole2,-pole3,pole1)-v)<0.001:
undo_pole(pole2,-pole3,pole1)
if np.abs(d(-pole2,pole3,pole1)-v)<0.001:
undo_pole(-pole2,pole3,pole1)
if np.abs(d(pole1,pole3,pole2)-v)<0.001:
undo_pole(pole1,pole3,pole2)
if np.abs(d(pole1,pole3,-pole2)-v)<0.001:
undo_pole(pole1,pole3,-pole2)
if np.abs(d(pole1,-pole3,pole2)-v)<0.001:
undo_pole(pole1,-pole3,pole2)
if np.abs(d(-pole1,pole3,pole2)-v)<0.001:
undo_pole(-pole1,pole3,pole2)
if np.abs(d(pole3,pole1,pole2)-v)<0.001:
undo_pole(pole3,pole1,pole2)
if np.abs(d(pole3,pole1,-pole2)-v)<0.001:
undo_pole(pole3,pole1,-pole2)
if np.abs(d(pole3,-pole1,pole2)-v)<0.001:
undo_pole(pole3,-pole1,pole2)
if np.abs(d(-pole3,pole1,pole2)-v)<0.001:
undo_pole(-pole3,pole1,pole2)
if np.abs(d(pole3,pole2,pole1)-v)<0.001:
undo_pole(pole3,pole2,pole1)
if np.abs(d(pole3,pole2,-pole1)-v)<0.001:
undo_pole(pole3,pole2,-pole1)
if np.abs(d(pole3,-pole2,pole1)-v)<0.001:
undo_pole(pole3,-pole2,pole1)
if np.abs(d(pole3,pole2,pole1)-v)<0.001:
undo_pole(pole3,pole2,pole1)
trace()
def addpole():
pole_entry=ui.pole_entry.text().split(",")
pole1=np.float(pole_entry[0])
pole2=np.float(pole_entry[1])
pole3=np.float(pole_entry[2])
pole(pole1,pole2,pole3)
trace()
def undo_addpole():
global M,axes,axesh,T,V,D,Dstar,trP,tr_schmid,nn,trC
pole_entry=ui.pole_entry.text().split(",")
pole1=np.float(pole_entry[0])
pole2=np.float(pole_entry[1])
pole3=np.float(pole_entry[2])
undo_pole(pole1,pole2,pole3)
trace()
####################################################################
#
# Plot a given plane and equivalent ones. Plot a cone
#
####################################################################
def trace_plan(pole1,pole2,pole3):
global M,axes,axesh,T,V,D,Dstar,trP,trC
pole_i=0
pole_c=color_trace()
if var_hexa()==1:
if var_uvw()==1:
pole1=2*pole1+pole2
pole2=2*pole2+pole1
pole_i=1
trP=np.vstack((trP,np.array([pole1,pole2,pole3,pole_i,pole_c])))
b=np.ascontiguousarray(trP).view(np.dtype((np.void, trP.dtype.itemsize * trP.shape[1])))
trP=np.unique(b).view(trP.dtype).reshape(-1, trP.shape[1])
def trace_cone(pole1,pole2,pole3):
global M,axes,axesh,T,V,D,Dstar,trC
pole_i=0
pole_c=color_trace()
inc=np.float(ui.inclination_entry.text())
if var_hexa()==1:
if var_uvw()==1:
pole1=2*pole1+pole2
pole2=2*pole2+pole1
pole_i=1
trC=np.vstack((trC,np.array([pole1,pole2,pole3,pole_i,pole_c,inc])))
b=np.ascontiguousarray(trC).view(np.dtype((np.void, trC.dtype.itemsize * trC.shape[1])))
trC=np.unique(b).view(trC.dtype).reshape(-1, trC.shape[1])
def trace_addplan():
global M,axes,axesh,T,V,D,Dstar,trP
pole_entry=ui.pole_entry.text().split(",")
pole1=np.float(pole_entry[0])
pole2=np.float(pole_entry[1])
pole3=np.float(pole_entry[2])
trace_plan(pole1,pole2,pole3)
trace_plan2
trace()
def trace_addcone():
global M,axes,axesh,T,V,D,Dstar,trC
pole_entry=ui.pole_entry.text().split(",")
pole1=np.float(pole_entry[0])
pole2=np.float(pole_entry[1])
pole3=np.float(pole_entry[2])
trace_cone(pole1,pole2,pole3)
trace()
def undo_trace_addplan():
global M,axes,axesh,T,V,D,Dstar,trP
pole_entry=ui.pole_entry.text().split(",")
pole1=np.float(pole_entry[0])
pole2=np.float(pole_entry[1])
pole3=np.float(pole_entry[2])
undo_trace_plan(pole1,pole2,pole3)
trace()
def undo_trace_addcone():
global M,axes,axesh,T,V,D,Dstar,trC
pole_entry=ui.pole_entry.text().split(",")
pole1=np.float(pole_entry[0])
pole2=np.float(pole_entry[1])
pole3=np.float(pole_entry[2])
undo_trace_cone(pole1,pole2,pole3)
trace()
def undo_trace_plan(pole1,pole2,pole3):
global M,axes,axesh,T,V,D,Dstar,trP,tr_schmid
ind=np.where((trP[:,0]==pole1) & (trP[:,1]==pole2)& (trP[:,2]==pole3)|(trP[:,0]==-pole1) & (trP[:,1]==-pole2)& (trP[:,2]==-pole3))
trP=np.delete(trP,ind,0)
b=np.ascontiguousarray(trP).view(np.dtype((np.void, trP.dtype.itemsize * trP.shape[1])))
trP=np.unique(b).view(trP.dtype).reshape(-1, trP.shape[1])
def undo_trace_cone(pole1,pole2,pole3):
global M,axes,axesh,T,V,D,Dstar,trC,tr_schmid
ind=np.where((trC[:,0]==pole1) & (trC[:,1]==pole2)& (trC[:,2]==pole3)|(trC[:,0]==-pole1) & (trC[:,1]==-pole2)& (trC[:,2]==-pole3))
trC=np.delete(trC,ind,0)
b=np.ascontiguousarray(trC).view(np.dtype((np.void, trC.dtype.itemsize * trC.shape[1])))
trC=np.unique(b).view(trC.dtype).reshape(-1, trC.shape[1])
def trace_plan_sym():
global M,axes,axesh,T,V,D,Dstar,G
pole_entry=ui.pole_entry.text().split(",")
pole1=np.float(pole_entry[0])
pole2=np.float(pole_entry[1])
pole3=np.float(pole_entry[2])
abc=ui.abc_entry.text().split(",")
a=np.float(abc[0])*1e-10
b=np.float(abc[1])*1e-10
c=np.float(abc[2])*1e-10
alphabetagamma=ui.alphabetagamma_entry.text().split(",")
alpha=np.float(alphabetagamma[0])*np.pi/180
beta=np.float(alphabetagamma[1])*np.pi/180
gamma=np.float(alphabetagamma[2])*np.pi/180
G=np.array([[a**2,a*b*np.cos(gamma),a*c*np.cos(beta)],[a*b*np.cos(gamma),b**2,b*c*np.cos(alpha)],[a*c*np.cos(beta),b*c*np.cos(alpha),c**2]])
v=d(pole1,pole2,pole3)
trace_plan(pole1,pole2,pole3)
if np.abs(alpha-np.pi/2)<0.001 and np.abs(beta-np.pi/2)<0.001 and np.abs(gamma-2*np.pi/3)<0.001:
trace_plan(pole1,pole2,pole3)
trace_plan(pole1,pole2,-pole3)
trace_plan(pole2,pole1,pole3)
trace_plan(pole2,pole1,-pole3)
trace_plan(-pole1-pole2,pole2,pole3)
trace_plan(-pole1-pole2,pole2,-pole3)
trace_plan(pole1,-pole1-pole2,pole3)
trace_plan(pole1,-pole1-pole2,-pole3)
trace_plan(pole2,-pole1-pole2,pole3)
trace_plan(pole2,-pole1-pole2,-pole3)
trace_plan(-pole1-pole2,pole1,pole3)
trace_plan(-pole1-pole2,pole1,-pole3)
else:
if np.abs(d(pole1,pole2,-pole3)-v)<0.001:
trace_plan(pole1,pole2,-pole3)
if np.abs(d(pole1,-pole2,pole3)-v)<0.001:
trace_plan(pole1,-pole2,pole3)
if np.abs(d(-pole1,pole2,pole3)-v)<0.001:
trace_plan(-pole1,pole2,pole3)
if np.abs(d(pole2,pole1,pole3)-v)<0.001:
trace_plan(pole2,pole1,pole3)
if np.abs(d(pole2,pole1,-pole3)-v)<0.001:
trace_plan(pole2,pole1,-pole3)
if np.abs(d(pole2,-pole1,pole3)-v)<0.001:
trace_plan(pole2,-pole1,pole3)
if np.abs(d(-pole2,pole1,pole3)-v)<0.001:
trace_plan(-pole2,pole1,pole3)
if np.abs(d(pole2,pole3,pole1)-v)<0.001:
trace_plan(pole2,pole3,pole1)
if np.abs(d(pole2,pole3,pole1)-v)<0.001:
trace_plan(pole2,pole3,-pole1)
if np.abs(d(pole2,-pole3,pole1)-v)<0.001:
trace_plan(pole2,-pole3,pole1)
if np.abs(d(-pole2,pole3,pole1)-v)<0.001:
trace_plan(-pole2,pole3,pole1)
if np.abs(d(pole1,pole3,pole2)-v)<0.001:
trace_plan(pole1,pole3,pole2)
if np.abs(d(pole1,pole3,-pole2)-v)<0.001:
trace_plan(pole1,pole3,-pole2)
if np.abs(d(pole1,-pole3,pole2)-v)<0.001:
trace_plan(pole1,-pole3,pole2)
if np.abs(d(-pole1,pole3,pole2)-v)<0.001:
trace_plan(-pole1,pole3,pole2)
if np.abs(d(pole3,pole1,pole2)-v)<0.001:
trace_plan(pole3,pole1,pole2)
if np.abs(d(pole3,pole1,-pole2)-v)<0.001:
trace_plan(pole3,pole1,-pole2)
if np.abs(d(pole3,-pole1,pole2)-v)<0.001:
trace_plan(pole3,-pole1,pole2)
if np.abs(d(-pole3,pole1,pole2)-v)<0.001:
trace_plan(-pole3,pole1,pole2)
if np.abs(d(pole3,pole2,pole1)-v)<0.001:
trace_plan(pole3,pole2,pole1)
if np.abs(d(pole3,pole2,-pole1)-v)<0.001:
trace_plan(pole3,pole2,-pole1)
if np.abs(d(pole3,-pole2,pole1)-v)<0.001:
trace_plan(pole3,-pole2,pole1)
if np.abs(d(pole3,pole2,pole1)-v)<0.001:
trace_plan(pole3,pole2,pole1)
trace()
def undo_trace_plan_sym():
global M,axes,axesh,T,V,D,Dstar,G
pole_entry=ui.pole_entry.text().split(",")
pole1=np.float(pole_entry[0])
pole2=np.float(pole_entry[1])
pole3=np.float(pole_entry[2])
abc=ui.abc_entry.text().split(",")
a=np.float(abc[0])*1e-10
b=np.float(abc[1])*1e-10
c=np.float(abc[2])*1e-10
alphabetagamma=ui.alphabetagamma_entry.text().split(",")
alpha=np.float(alphabetagamma[0])*np.pi/180;
beta=np.float(alphabetagamma[1])*np.pi/180;
gamma=np.float(alphabetagamma[2])*np.pi/180;
G=np.array([[a**2,a*b*np.cos(gamma),a*c*np.cos(beta)],[a*b*np.cos(gamma),b**2,b*c*np.cos(alpha)],[a*c*np.cos(beta),b*c*np.cos(alpha),c**2]])
v=d(pole1,pole2,pole3)
undo_trace_plan(pole1,pole2,pole3)
if np.abs(alpha-np.pi/2)<0.001 and np.abs(beta-np.pi/2)<0.001 and np.abs(gamma-2*np.pi/3)<0.001:
undo_trace_plan(pole1,pole2,pole3)
undo_trace_plan(pole1,pole2,-pole3)
undo_trace_plan(pole2,pole1,pole3)
undo_trace_plan(pole2,pole1,-pole3)
undo_trace_plan(-pole1-pole2,pole2,pole3)
undo_trace_plan(-pole1-pole2,pole2,-pole3)
undo_trace_plan(pole1,-pole1-pole2,pole3)
undo_trace_plan(pole1,-pole1-pole2,-pole3)
undo_trace_plan(pole2,-pole1-pole2,pole3)
undo_trace_plan(pole2,-pole1-pole2,-pole3)
undo_trace_plan(-pole1-pole2,pole1,pole3)
undo_trace_plan(-pole1-pole2,pole1,-pole3)
else:
if np.abs(d(pole1,pole2,-pole3)-v)<0.001:
undo_trace_plan(pole1,pole2,-pole3)
if np.abs(d(pole1,-pole2,pole3)-v)<0.001:
undo_trace_plan(pole1,-pole2,pole3)
if np.abs(d(-pole1,pole2,pole3)-v)<0.001:
undo_trace_plan(-pole1,pole2,pole3)
if np.abs(d(pole2,pole1,pole3)-v)<0.001:
undo_trace_plan(pole2,pole1,pole3)
if np.abs(d(pole2,pole1,-pole3)-v)<0.001:
undo_trace_plan(pole2,pole1,-pole3)
if np.abs(d(pole2,-pole1,pole3)-v)<0.001:
undo_trace_plan(pole2,-pole1,pole3)
if np.abs(d(-pole2,pole1,pole3)-v)<0.001:
undo_trace_plan(-pole2,pole1,pole3)
if np.abs(d(pole2,pole3,pole1)-v)<0.001:
undo_trace_plan(pole2,pole3,pole1)
if np.abs(d(pole2,pole3,pole1)-v)<0.001:
undo_trace_plan(pole2,pole3,-pole1)
if np.abs(d(pole2,-pole3,pole1)-v)<0.001:
undo_trace_plan(pole2,-pole3,pole1)
if np.abs(d(-pole2,pole3,pole1)-v)<0.001:
undo_trace_plan(-pole2,pole3,pole1)
if np.abs(d(pole1,pole3,pole2)-v)<0.001:
undo_trace_plan(pole1,pole3,pole2)
if np.abs(d(pole1,pole3,-pole2)-v)<0.001:
undo_trace_plan(pole1,pole3,-pole2)
if np.abs(d(pole1,-pole3,pole2)-v)<0.001:
undo_trace_plan(pole1,-pole3,pole2)
if np.abs(d(-pole1,pole3,pole2)-v)<0.001:
undo_trace_plan(-pole1,pole3,pole2)
if np.abs(d(pole3,pole1,pole2)-v)<0.001:
undo_trace_plan(pole3,pole1,pole2)
if np.abs(d(pole3,pole1,-pole2)-v)<0.001:
undo_trace_plan(pole3,pole1,-pole2)
if np.abs(d(pole3,-pole1,pole2)-v)<0.001:
undo_trace_plan(pole3,-pole1,pole2)
if np.abs(d(-pole3,pole1,pole2)-v)<0.001:
undo_trace_plan(-pole3,pole1,pole2)
if np.abs(d(pole3,pole2,pole1)-v)<0.001:
undo_trace_plan(pole3,pole2,pole1)
if np.abs(d(pole3,pole2,-pole1)-v)<0.001:
undo_trace_plan(pole3,pole2,-pole1)
if np.abs(d(pole3,-pole2,pole1)-v)<0.001:
undo_trace_plan(pole3,-pole2,pole1)
if np.abs(d(pole3,pole2,pole1)-v)<0.001:
undo_trace_plan(pole3,pole2,pole1)
trace()
def trace_plan2(B):
global M,axes,axesh,T,V,D,Dstar,a
for h in range(0,B.shape[0]):
pole1=B[h,0]
pole2=B[h,1]
pole3=B[h,2]
Gs=np.array([pole1,pole2,pole3],float)
if B[h,3]==0:
Gsh=np.dot(Dstar,Gs)/np.linalg.norm(np.dot(Dstar,Gs))
else:
Gsh=np.dot(D,Gs)/np.linalg.norm(np.dot(D,Gs))
S=np.dot(M,Gsh)
if S[2]<0:
S=-S
Gsh=-Gsh
pole1=-pole1
pole2=-pole2
pole3=-pole3
r=np.sqrt(S[0]**2+S[1]**2+S[2]**2)
A=np.zeros((2,100))
Q=np.zeros((1,2))
t=np.arctan2(S[1],S[0])*180/np.pi
w=0
ph=np.arccos(S[2]/r)*180/np.pi
for g in np.linspace(-np.pi,np.pi,100):
Aa=np.dot(Rot(t,0,0,1),np.dot(Rot(ph,0,1,0),np.array([np.sin(g),np.cos(g),0])))
A[:,w]=proj(Aa[0],Aa[1],Aa[2])*300
Q=np.vstack((Q,A[:,w]))
w=w+1
Q=np.delete(Q,0,0)
if B[h,4]==1:
a.plot(Q[:,0]+300,Q[:,1]+300,'g')
if B[h,4]==2:
a.plot(Q[:,0]+300,Q[:,1]+300,'b')
if B[h,4]==3:
a.plot(Q[:,0]+300,Q[:,1]+300,'r')
def trace_cone2(B):
global M,axes,axesh,T,V,D,Dstar,a
for h in range(0,B.shape[0]):
pole1=B[h,0]
pole2=B[h,1]
pole3=B[h,2]
i=B[h,5]
Gs=np.array([pole1,pole2,pole3],float)
if B[h,3]==0:
Gsh=np.dot(Dstar,Gs)/np.linalg.norm(np.dot(Dstar,Gs))
else:
Gsh=np.dot(D,Gs)/np.linalg.norm(np.dot(D,Gs))
S=np.dot(M,Gsh)
if S[2]<0:
S=-S
Gsh=-Gsh
pole1=-pole1
pole2=-pole2
pole3=-pole3
r=np.sqrt(S[0]**2+S[1]**2+S[2]**2)
A=np.zeros((3,100))
Q=np.zeros((1,3))
t=np.arctan2(S[1],S[0])*180/np.pi
w=0
ph=np.arccos(S[2]/r)*180/np.pi
for g in np.linspace(-np.pi,np.pi,100):
Aa=np.dot(Rot(t,0,0,1),np.dot(Rot(ph,0,1,0),np.array([np.sin(g)*np.sin(i*np.pi/180),np.cos(g)*np.sin(i*np.pi/180),np.cos(i*np.pi/180)])))
A[:,w]=proj2(Aa[0],Aa[1],Aa[2])*300
Q=np.vstack((Q,A[:,w]))
w=w+1
Q=np.delete(Q,0,0)
asign = np.sign(Q[:,2])
signchange = ((np.roll(asign, 1) - asign) != 0).astype(int)
wp=np.where(signchange==1)[0]
wp=np.append(0,wp)
wp=np.append(wp,99)
for tt in range(0, np.shape(wp)[0]-1):
if B[h,4]==1:
a.plot(Q[int(wp[tt]):int(wp[tt+1]),0]+300,Q[int(wp[tt]):int(wp[tt+1]),1]+300,'g')
if B[h,4]==2:
a.plot(Q[int(wp[tt]):int(wp[tt+1]),0]+300,Q[int(wp[tt]):int(wp[tt+1]),1]+300,'b')
if B[h,4]==3:
a.plot(Q[int(wp[tt]):int(wp[tt+1]),0]+300,Q[int(wp[tt]):int(wp[tt+1]),1]+300,'r')
####################################################################
#
# Click a pole
#
####################################################################
def click_a_pole(event):
global M,Dstar,D,minx,maxx,miny,maxy,a,Stc
if event.button==3:
x=event.xdata
y=event.ydata
x=(x-300)/300
y=(y-300)/300
X=2*x/(1+x**2+y**2)
Y=2*y/(1+x**2+y**2)
Z=(-1+x**2+y**2)/(1+x**2+y**2)
if Z<0:
X=-X
Y=-Y
A=np.dot(np.linalg.inv(M),np.array([X,Y,Z]))
if var_uvw()==0:
A=np.dot(np.linalg.inv(Dstar),A)*1e10*100
else:
A=np.dot(np.linalg.inv(D),A)*1e-10*100
if var_hexa()==1:
Aa=(2*A[0]-A[1])/3
Ab=(2*A[1]-A[0])/3
A[0]=Aa
A[1]=Ab
pole(A[0],A[1],A[2])
Stc=np.vstack((Stc, np.array([A[0],A[1],A[2]])))
trace()
def undo_click_a_pole():
global Stc
undo_pole(Stc[-1,0],Stc[-1,1],Stc[-1,2])
Stc=Stc[:-1,:]
trace()
####################################################################
#
# Inclinaison-beta indicator when the mouse is on the stereo
#
####################################################################
def coordinates(event):
t_ang=ang_work_space()*np.pi/180
if event.xdata and event.ydata:
x=event.xdata
y=event.ydata
x=(x-300)/300
y=(y-300)/300
X0=2*x/(1+x**2+y**2)
Y0=2*y/(1+x**2+y**2)
Z0=(-1+x**2+y**2)/(1+x**2+y**2)
Rxyz=np.dot(Rot(t_ang*180/np.pi,0,0,1),[X0,Y0,Z0])
X=Rxyz[0]
Y=Rxyz[1]
Z=Rxyz[2]
lat=np.arctan2(np.sqrt(X**2+Z**2),Y)*180/np.pi
if X<0:
lat=-lat
longi=-np.arctan2(Z,X)*180/np.pi
if ui.alpha_signBox.isChecked():
longi=-longi
if np.abs(longi)>90:
if longi>0:
longi=longi-180
else:
longi=longi+180
c=str(np.around(longi,decimals=1))+str(',')+str(np.around(lat,decimals=1))
ui.coord_label.setText(str(c))
########################################################
#
# Calculate interplanar distance
#
#######################################################
def dhkl():
pole_entry=ui.pole_entry.text().split(",")
i=np.float(pole_entry[0])
j=np.float(pole_entry[1])
k=np.float(pole_entry[2])
abc=ui.abc_entry.text().split(",")
a=np.float(abc[0])
b=np.float(abc[1])
c=np.float(abc[2])
alphabetagamma=ui.alphabetagamma_entry.text().split(",")
alp=np.float(alphabetagamma[0])*np.pi/180
bet=np.float(alphabetagamma[1])*np.pi/180
gam=np.float(alphabetagamma[2])*np.pi/180
G=np.array([[a**2,a*b*np.cos(gam),a*c*np.cos(bet)],[a*b*np.cos(gam),b**2,b*c*np.cos(alp)],[a*c*np.cos(bet),b*c*np.cos(alp),c**2]])
d=np.around(1/(np.sqrt(np.dot(np.array([i,j,k]),np.dot(np.linalg.inv(G),np.array([i,j,k]))))), decimals=3)
ui.dhkl_label.setText(str(d))
return
####################################################################
#
# Reset view after zoom/update axes/angles
#
####################################################################
def reset_view():
global a
a.axis([minx,maxx,miny,maxy])
mpl.rcParams['font.size'] = ui.text_size_entry.text()
trace()
def tilt_axes():
global s_a,s_b,s_z
s_a,s_b,s_z=1,1,1
if ui.alpha_signBox.isChecked():
s_a=-1
if ui.beta_signBox.isChecked():
s_b=-1
if ui.theta_signBox.isChecked():
s_b=-1
return s_a,s_b,s_z
####################
#
# define work space (real or reciprocal) to take tilt/y axis angles into account
#
######################
def ang_work_space():
if ui.real_space_checkBox.isChecked():
t_ang=np.float(ui.image_angle_entry.text())
else:
t_ang=np.float(ui.tilt_angle_entry.text())
return t_ang
####################################################################
#
# Enable or disable Wulff net
#
####################################################################
def wulff():
global a
if ui.wulff_button.isChecked():
fn = os.path.join(os.path.dirname(__file__), 'stereo.png')
img=Image.open(fn)
img=img.rotate(float(ang_work_space()), fillcolor='white')
img= np.array(img)
else:
img = 255*np.ones([600,600,3],dtype=np.uint8)
circle = plt.Circle((300, 300), 300, color='black',fill=False)
a.add_artist(circle)
a.plot(300,300,'+',markersize=10,mew=3,color='black')
a.imshow(img,interpolation="bicubic")
#a.axis('off')
plt.tight_layout()
figure.subplots_adjust(left=0.1,right=0.9, bottom=0.05,top=0.95, hspace=0.2,wspace=0.2)
a.figure.canvas.draw()
def text_label(A,B):
Aa=A[0]
Ab=A[1]
Ac=A[2]
if B[3]==1 & var_hexa()==1:
Aa=(2*A[0]-A[1])/3
Ab=(2*A[1]-A[0])/3
if np.sign(Aa)<0:
s0=r'$\overline{'+str(np.abs(int(Aa)))+'}$'
else:
s0=str(np.abs(int(Aa)))
if np.sign(Ab)<0:
s1=r'$\overline{'+str(np.abs(int(Ab)))+'}$'
else:
s1=str(np.abs(int(Ab)))
if np.sign(Ac)<0:
s2=r'$\overline{'+str(np.abs(int(Ac)))+'}$'
else:
s2=str(np.abs(int(Ac)))
s=s0+','+s1+','+s2
if var_hexa()==1:
if np.sign(-Aa-Ab)<0:
s3=r'$\overline{'+str(int(np.abs(-Aa-Ab)))+'}$'
else:
s3=str(int(np.abs(-Aa-Ab)))
s=s0+','+s1+','+s3+','+s2
if B[3]==1:
s='['+s+']'
return s
#######################################################################
#######################################################################
#
# Main
#
#####################################################################
####################################################################
#
# Refresh action on stereo
#
####################################################################
def trace():
global T,x,y,z,axes,axesh,M,trP,a,trC,s_a,s_b,s_z
minx,maxx=a.get_xlim()
miny,maxy=a.get_ylim()
a = figure.add_subplot(111)
a.figure.clear()
a = figure.add_subplot(111)
P=np.zeros((axes.shape[0],2))
T=np.zeros((axes.shape))
C=[]
trace_plan2(trP)
trace_cone2(trC)
schmid_trace2(tr_schmid)
tilt_axes()
for i in range(0,axes.shape[0]):
if axesh[i,6]==1:
axeshr=np.array([axesh[i,0],axesh[i,1],axesh[i,2]])
T[i,:]=np.dot(M,axeshr)
P[i,:]=proj(T[i,0],T[i,1],T[i,2])*300
if axesh[i,4]==1:
C.append('g')
if axesh[i,4]==2:
C.append('b')
if axesh[i,4]==3:
C.append('r')
s=text_label(axes[i,:],axesh[i,:])
a.annotate(s,(P[i,0]+300,P[i,1]+300))
if ui.reciprocal_checkBox.isChecked():
if np.shape(axes)[0]>0:
s0=axesh[:,6]*axesh[:,5]/np.amax(axesh[:,5])
else:
s0=axesh[:,6]
else:
s0=axesh[:,6]
if var_carre()==0:
a.scatter(P[:,0]+300,P[:,1]+300,c=C,s=s0*np.float(ui.size_var.text()))
else:
a.scatter(P[:,0]+300,P[:,1]+300,edgecolor=C, s=s0*np.float(ui.size_var.text()), facecolors='none', linewidths=1.5)
a.axis([minx,maxx,miny,maxy])
wulff()
####################################
#
# Initial plot from a given diffraction
#
####################################
def princ():
global T,angle_alpha, angle_beta, angle_z,M,Dstar,D,g,M0,trP,axeshr,nn,a,minx,maxx,miny,maxy,trC,Stc, naxes,dmip,tr_schmid,s_a,s_b,s_z
trP=np.zeros((1,5))
trC=np.zeros((1,6))
Stc=np.zeros((1,3))
tr_schmid=np.zeros((1,3))
dmip=0
naxes=0
crist()
tilt_axes()
if ui.reciprocal_checkBox.isChecked():
crist_reciprocal()
a = figure.add_subplot(111)
a.figure.clear()
a = figure.add_subplot(111)
diff=ui.diff_entry.text().split(",")
diff1=np.float(diff[0])
diff2=np.float(diff[1])
diff3=np.float(diff[2])
tilt=ui.tilt_entry.text().split(",")
tilt_a=np.float(tilt[0])
tilt_b=np.float(tilt[1])
tilt_z=np.float(tilt[2])
inclinaison=np.float(ui.inclinaison_entry.text())
diff_ang=-ang_work_space()
d0=np.array([diff1,diff2,diff3])
if var_uvw()==0:
d=np.dot(Dstar,d0)
else:
d=np.dot(Dstar,d0)
if diff2==0 and diff1==0:
normal=np.array([1,0,0])
ang=np.pi/2
else:
normal=np.array([-d[2],0,d[0]])
ang=np.arccos(np.dot(d,np.array([0,1,0]))/np.linalg.norm(d))
R=np.dot(Rot(diff_ang,0,0,1),np.dot(Rot(-s_z*tilt_z,0,0,1),np.dot(Rot(-s_b*tilt_b,1,0,0),np.dot(Rot(-s_a*tilt_a,0,1,0),np.dot(Rot(-inclinaison,0,0,1),Rot(ang*180/np.pi, normal[0],normal[1],normal[2]))))))
P=np.zeros((axes.shape[0],2))
T=np.zeros((axes.shape))
nn=axes.shape[0]
C=[]
for i in range(0,axes.shape[0]):
if axesh[i,5]!=-1:
axeshr=np.array([axesh[i,0],axesh[i,1],axesh[i,2]])
T[i,:]=np.dot(R,axeshr)
P[i,:]=proj(T[i,0],T[i,1],T[i,2])*300
axeshr=axeshr/np.linalg.norm(axeshr)
if axesh[i,4]==1:
C.append('g')
if axesh[i,4]==2:
C.append('b')
if axesh[i,4]==3:
C.append('r')
s=text_label(axes[i,:],axesh[i,:])
a.annotate(s,(P[i,0]+300,P[i,1]+300))
if ui.reciprocal_checkBox.isChecked():
if np.shape(axes)[0]>0:
s0=axesh[:,6]*axesh[:,5]/np.amax(axesh[:,5])
else:
s0=axesh[:,6]
else:
s0=axesh[:,6]
if var_carre()==0:
a.scatter(P[:,0]+300,P[:,1]+300,c=C,s=s0*np.float(ui.size_var.text()))
else:
a.scatter(P[:,0]+300,P[:,1]+300,edgecolor=C, s=s0*np.float(ui.size_var.text()), facecolors='none', linewidths=1.5)
minx,maxx=-2,602
miny,maxy=-2,602
a.axis([minx,maxx,miny,maxy])
wulff()
angle_alpha=0
angle_beta=0
angle_z=0
g=0
ui.angle_alpha_label_2.setText('0.0')
ui.angle_beta_label_2.setText('0.0')
ui.angle_z_label_2.setText('0.0')
ui.angle_beta_label_2.setText('0.0')
ui.angle_z_label_2.setText('0.0')
ui.rg_label.setText('0.0')
M=R
M0=R
euler_label()
return T,angle_alpha,angle_beta,angle_z,g,M,M0
##############################################"
#
# Plot from Euler angle
#
##################################################"
def princ2():
global T,angle_alpha,angle_beta,angle_z,M,Dstar,D,g,M0,trP,a,axeshr,nn,minx,maxx,miny,maxy,trC,Stc,naxes,dmip,tr_schmid,s_a,s_b,s_c
trP=np.zeros((1,5))
trC=np.zeros((1,6))
Stc=np.zeros((1,3))
tr_schmid=np.zeros((1,3))
a = figure.add_subplot(111)
a.figure.clear()
a = figure.add_subplot(111)
phi1phiphi2=ui.phi1phiphi2_entry.text().split(",")
phi1=np.float(phi1phiphi2[0])
phi=np.float(phi1phiphi2[1])
phi2=np.float(phi1phiphi2[2])
dmip=0
naxes=0
crist()
tilt_axes()
if ui.reciprocal_checkBox.isChecked():
crist_reciprocal()
P=np.zeros((axes.shape[0],2))
T=np.zeros((axes.shape))
nn=axes.shape[0]
C=[]
for i in range(0,axes.shape[0]):
axeshr=np.array([axesh[i,0],axesh[i,1],axesh[i,2]])
T[i,:]=np.dot(rotation(phi1,phi,phi2),axeshr)
P[i,:]=proj(T[i,0],T[i,1],T[i,2])*300
if color_trace()==1:
C.append('g')
axesh[i,4]=1
if color_trace()==2:
C.append('b')
axesh[i,4]=2
if color_trace()==3:
C.append('r')
axesh[i,4]=3
s=text_label(axes[i,:],axesh[i,:])
a.annotate(s,(P[i,0]+300,P[i,1]+300))
if ui.reciprocal_checkBox.isChecked():
s0=axesh[:,5]/np.amax(axesh[:,5])
else:
s0=1
if var_carre()==0:
a.scatter(P[:,0]+300,P[:,1]+300,c=C,s=s0*np.float(ui.size_var.text()))
else:
a.scatter(P[:,0]+300,P[:,1]+300,edgecolor=C, s=s0*np.float(ui.size_var.text()), facecolors='none', linewidths=1.5)
minx,maxx=-2,602
miny,maxy=-2,602
a.axis([minx,maxx,miny,maxy])
wulff()
angle_alpha=0
angle_beta=0
angle_z=0
g=0
ui.angle_alpha_label_2.setText('0.0')
ui.angle_beta_label_2.setText('0.0')
ui.angle_z_label_2.setText('0.0')
ui.angle_beta_label_2.setText('0.0')
ui.angle_z_label_2.setText('0.0')
ui.rg_label.setText('0.0')
M=rotation(phi1,phi,phi2)
t=str(np.around(phi1,decimals=1))+str(',')+str(np.around(phi,decimals=1))+str(',')+str(np.around(phi2,decimals=1))
ui.angle_euler_label.setText(t)
return T,angle_alpha,angle_beta,angle_z,g,M,naxes
#######################################################################
#######################################################################
#
# GUI Menu/Dialog
#
#######################################################################
######################################################
#
# Menu
#
##########################################################
###########################################################
#
# Structure
#
##############################################################
def structure(item):
global x0, var_hexa, d_label_var, e_entry
ui.abc_entry.setText(str(item[1])+','+str(item[2])+','+str(item[3]))
ui.alphabetagamma_entry.setText(str(item[4])+','+str(item[5])+','+str(item[6]))
ii=ui.space_group_Box.findText(str(item[7]))
ui.space_group_Box.setCurrentIndex(ii)
if eval(item[4])==90 and eval(item[5])==90 and eval(item[6])==120 :
ui.hexa_button.setChecked(True)
ui.e_entry.setText('2')
ui.d_label_var.setText('3')
else:
ui.d_entry.setText('1')
ui.e_entry.setText('1')
####################################################################
#
# Measuring angle between two poles (for the angle dialog box)
#
####################################################################
def angle():
global Dstar
n1=ui_angle.n1_entry.text().split(",")
c100=np.float(n1[0])
c110=np.float(n1[1])
c120=np.float(n1[2])
n2=ui_angle.n2_entry.text().split(",")
c200=np.float(n2[0])
c210=np.float(n2[1])
c220=np.float(n2[2])
c1=np.array([c100,c110,c120])
c2=np.array([c200,c210,c220])
if ui.uvw_button.isChecked==True:
c1c=np.dot(Dstar,c1)
c2c=np.dot(Dstar,c2)
else:
c1c=np.dot(Dstar,c1)
c2c=np.dot(Dstar,c2)
the=np.arccos(np.dot(c1c,c2c)/(np.linalg.norm(c1c)*np.linalg.norm(c2c)))
thes=str(np.around(the*180/np.pi,decimals=2))
ui_angle.angle_label.setText(thes)
##################################################
#
# Schmid factor calculation (for the Schmid dialog box). Calculate the Schmid factor for a
# given b,n couple or for equivalent couples
#
###################################################
def prod_scal(c1,c2):
global M, Dstar, D
alphabetagamma=ui.alphabetagamma_entry.text().split(",")
alp=np.float(alphabetagamma[0])*np.pi/180
bet=np.float(alphabetagamma[1])*np.pi/180
gam=np.float(alphabetagamma[2])*np.pi/180
if np.abs(alp-np.pi/2)<0.001 and np.abs(bet-np.pi/2)<0.001 and np.abs(gam-2*np.pi/3)<0.001:
c2p=np.array([0,0,0])
c2p[0]=2*c2[0]+c2[1]
c2p[1]=2*c2[1]+c2[0]
c2p[2]=c2[2]
c2c=np.dot(D, c2p)
else:
c2c=np.dot(D, c2)
c1c=np.dot(Dstar,c1)
p=np.dot(c1c,c2c)
return p
def schmid_calc(b,n, T):
global D, Dstar,M
alphabetagamma=ui.alphabetagamma_entry.text().split(",")
alp=np.float(alphabetagamma[0])*np.pi/180
bet=np.float(alphabetagamma[1])*np.pi/180
gam=np.float(alphabetagamma[2])*np.pi/180
if np.abs(alp-np.pi/2)<0.001 and np.abs(bet-np.pi/2)<0.001 and np.abs(gam-2*np.pi/3)<0.001:
b2=np.array([0,0,0])
b2[0]=2*b[0]+b[1]
b2[1]=2*b[1]+b[0]
b2[2]=b[2]
bpr=np.dot(D, b2)
else:
bpr=np.dot(D, b)
npr=np.dot(Dstar,n)
npr2=np.dot(M,npr)
bpr2=np.dot(M,bpr)
T=T/np.linalg.norm(T)
t_ang=-ang_work_space()
T=np.dot(Rot(t_ang,0,0,1),T)
anglen=np.arccos(np.dot(npr2,T)/np.linalg.norm(npr2))
angleb=np.arccos(np.dot(bpr2,T)/np.linalg.norm(bpr2))
s=np.cos(anglen)*np.cos(angleb)
return s
def schmid_pole(pole1,pole2,pole3):
global M,V,D,Dstar,G
alphabetagamma=ui.alphabetagamma_entry.text().split(",")
alp=np.float(alphabetagamma[0])*np.pi/180;
bet=np.float(alphabetagamma[1])*np.pi/180;
gam=np.float(alphabetagamma[2])*np.pi/180;
v=d(pole1,pole2,pole3)
N=np.array([pole1,pole2,pole3])
if np.abs(alp-np.pi/2)<0.001 and np.abs(bet-np.pi/2)<0.001 and np.abs(gam-2*np.pi/3)<0.001:
N=np.array([[pole1,pole2,pole3],[pole1,pole2,-pole3],[pole2,pole1,pole3],[pole2,pole1,-pole3],[-pole1-pole2,pole2,pole3],[-pole1-pole2,pole2,-pole3],[pole1,-pole1-pole2,pole3],[pole1,-pole1-pole2,-pole3],[pole2,-pole1-pole2,pole3],[pole2,-pole1-pole2,-pole3],[-pole1-pole2,pole1,pole3],[-pole1-pole2,pole1,-pole3]])
else:
if np.abs(d(pole1,pole2,-pole3)-v)<0.001:
N=np.vstack((N,np.array([pole1,pole2,-pole3])))
if np.abs(d(pole1,-pole2,pole3)-v)<0.001:
N=np.vstack((N,np.array([pole1,-pole2,pole3])))
if np.abs(d(-pole1,pole2,pole3)-v)<0.001:
N=np.vstack((N,np.array([-pole1,pole2,pole3])))
if np.abs(d(pole2,pole1,pole3)-v)<0.001:
N=np.vstack((N,np.array([pole2,pole1,pole3])))
if np.abs(d(pole2,pole1,-pole3)-v)<0.001:
N=np.vstack((N,np.array([pole2,pole1,-pole3])))
if np.abs(d(pole2,-pole1,pole3)-v)<0.001:
N=np.vstack((N,np.array([pole2,-pole1,pole3])))
if np.abs(d(-pole2,pole1,pole3)-v)<0.001:
N=np.vstack((N,np.array([-pole2,pole1,pole3])))
if np.abs(d(pole2,pole3,pole1)-v)<0.001:
N=np.vstack((N,np.array([pole2,pole3,pole1])))
if np.abs(d(pole2,pole3,pole1)-v)<0.001:
N=np.vstack((N,np.array([pole2,pole3,-pole1])))
if np.abs(d(pole2,-pole3,pole1)-v)<0.001:
N=np.vstack((N,np.array([pole2,-pole3,pole1])))
if np.abs(d(-pole2,pole3,pole1)-v)<0.001:
N=np.vstack((N,np.array([-pole2,pole3,pole1])))
if np.abs(d(pole1,pole3,pole2)-v)<0.001:
N=np.vstack((N,np.array([pole1,pole3,pole2])))
if np.abs(d(pole1,pole3,-pole2)-v)<0.001:
N=np.vstack((N,np.array([pole1,pole3,-pole2])))
if np.abs(d(pole1,-pole3,pole2)-v)<0.001:
N=np.vstack((N,np.array([pole1,-pole3,pole2])))
if np.abs(d(-pole1,pole3,pole2)-v)<0.001:
N=np.vstack((N,np.array([-pole1,pole3,pole2])))
if np.abs(d(pole3,pole1,pole2)-v)<0.001:
N=np.vstack((N,np.array([pole3,pole1,pole2])))
if np.abs(d(pole3,pole1,-pole2)-v)<0.001:
N=np.vstack((N,np.array([pole3,pole1,-pole2])))
if np.abs(d(pole3,-pole1,pole2)-v)<0.001:
N=np.vstack((N,np.array([pole3,-pole1,pole2])))
if np.abs(d(-pole3,pole1,pole2)-v)<0.001:
N=np.vstack((N,np.array([-pole3,pole1,pole2])))
if np.abs(d(pole3,pole2,pole1)-v)<0.001:
N=np.vstack((N,np.array([pole3,pole2,pole1])))
if np.abs(d(pole3,pole2,-pole1)-v)<0.001:
N=np.vstack((N,np.array([pole3,pole2,-pole1])))
if np.abs(d(pole3,-pole2,pole1)-v)<0.001:
N=np.vstack((N,np.array([pole3,-pole2,pole1])))
if np.abs(d(pole3,pole2,pole1)-v)<0.001:
N=np.vstack((N,np.array([pole3,pole2,pole1])))
return N
def schmid():
global D, Dstar,M
n=ui_schmid.n_entry.text().split(",")
h=np.float(n[0])
k=np.float(n[1])
l=np.float(n[2])
b=ui_schmid.b_entry.text().split(",")
u=np.float(b[0])
v=np.float(b[1])
w=np.float(b[2])
n=np.array([h,k,l])
b=np.array([u,v,w])
T0=ui_schmid.T_entry.text().split(",")
T=np.array([np.float(T0[0]),np.float(T0[1]),np.float(T0[2])])
s=schmid_calc(b,n,T)
ui_schmid.schmid_factor_label.setText(str(np.around(s,decimals=2)))
B=schmid_pole(u,v,w)
N=schmid_pole(h,k,l)
P=np.array([0,0,0,0,0,0,0])
for i in range(0,np.shape(N)[0]):
for j in range(0,np.shape(B)[0]):
if np.abs(prod_scal(N[i,:],B[j,:]))<0.0001:
s=schmid_calc(B[j,:],N[i,:],T)
R=np.array([s,N[i,0],N[i,1],N[i,2],B[j,0],B[j,1],B[j,2]])
P=np.vstack((P,R))
P=np.delete(P, (0), axis=0)
P=unique_rows(P)
P=-P
P.view('float64,i8,i8,i8,i8,i8,i8').sort(order=['f0'], axis=0)
P=-P
ui_schmid.schmid_text.setText( 's | n | b')
for k in range(0,np.shape(P)[0]):
ui_schmid.schmid_text.append(str(np.around(P[k,0],decimals=3))+ '| '+str(np.int(P[k,1]))+ str(np.int(P[k,2])) +str(np.int(P[k,3]))+ '| '+ str(np.int(P[k,4]))+str(np.int(P[k,5]))+str(np.int(P[k,6])))
#######################################
#
# Save stereo as png
#
############################################"
def image_save():
filename=QtGui.QFileDialog.getSaveFileName( Index,"Save file", "", ".png")
pixmap = QtGui.QPixmap.grabWidget(canvas,55,49,710,710)
pixmap.save(str(filename)+".png")
##################################################
#
# Calculating x,y,z direction and hkl<>uvw (for xyz dialog box)
#
###################################################
def center():
global D, Dstar,M
A=np.dot(np.linalg.inv(M),np.array([0,0,1]))
A2=np.dot(np.linalg.inv(M),np.array([1,0,0]))
A3=np.dot(np.linalg.inv(M),np.array([0,1,0]))
C=np.dot(np.linalg.inv(Dstar),A)
Zp=C/np.linalg.norm(C)
C2=np.dot(np.linalg.inv(Dstar),A2)
Xp=C2/np.linalg.norm(C2)
C3=np.dot(np.linalg.inv(Dstar),A3)
Yp=C3/np.linalg.norm(C3)
ui_xyz.X_text.setText(str(Xp[0]*100)+', '+str(Xp[1]*100)+', '+str(Xp[2]*100))
ui_xyz.Y_text.setText(str(Yp[0]*100)+', '+str(Yp[1]*100)+', '+str(Yp[2]*100))
ui_xyz.Z_text.setText(str(Zp[0]*100)+', '+str(Zp[1]*100)+', '+str(Zp[2]*100))
return Xp,Yp,Zp
def to_uvw():
global Dstar, D, M
hkl=ui_hkl_uvw.hkl_entry.text().split(",")
plane=np.array([np.float( hkl[0]),np.float(hkl[1]),np.float(hkl[2])])
plane=plane/np.linalg.norm(plane)
direction=np.dot(np.linalg.inv(D),np.dot(Dstar,plane))*1e-20
if var_hexa()==1:
na=(2*direction[0]-direction[1])/3
n2a=(2*direction[1]-direction[0])/3
direction[0]=na
direction[1]=n2a
ui_hkl_uvw.uvw_label.setText(str(np.round(100*direction[0],decimals=3))+', '+str(np.round(100*direction[1],decimals=3))+', '+str(np.round(100*direction[2],decimals=3)))
def to_hkl():
global Dstar, D, M
uvw=ui_hkl_uvw.uvw_entry.text().split(",")
direction=np.array([np.float( uvw[0]),np.float(uvw[1]),np.float(uvw[2])])
if var_hexa()==1:
na=2*direction[0]+direction[1]
n2a=2*direction[1]+direction[0]
direction[0]=na
direction[1]=n2a
direction=direction/np.linalg.norm(direction)
plane=np.dot(np.linalg.inv(Dstar),np.dot(D,direction))*1e20
ui_hkl_uvw.hkl_label.setText(str(np.round(100*plane[0],decimals=3))+', '+str(np.round(100*plane[1],decimals=3))+', '+str(np.round(100*plane[2],decimals=3)))
##########################################################################
#
# Apparent width class for dialog box: plot the width of a plane of given normal hkl with the tilt alpha angle. Plot trace direction with respect to the tilt axis
#
##########################################################################
def plot_width():
global D, Dstar, M
# ui_width.figure.clf()
B=np.dot(np.linalg.inv(M),np.array([0,0,1]))
plan=ui_width.plane_entry.text().split(",")
n=np.array([np.float(plan[0]),np.float(plan[1]),np.float(plan[2])])
nr=np.dot(Dstar,n)
nr=nr/np.linalg.norm(nr)
B=np.dot(Dstar,B)
B=B/np.linalg.norm(B)
la=np.zeros((1,41))
la2=np.zeros((2,41))
k=0
t_ang=ang_work_space()
if ui_width.surface_box.isChecked():
s0=ui_width.foil_surface.text().split(",")
s=np.array([np.float(s0[0]),np.float(s0[1]),np.float(s0[2])])
T=np.cross(nr,s)
else:
T=np.cross(nr,B)
T=T/np.linalg.norm(T)
for t in range(-40,41,2):
Mi=np.dot(Rot(t,0,1,0), np.dot(Rot(t_ang,0,0,1),M))
Bi=np.dot(np.linalg.inv(Mi),np.array([0,0,1]))
Bi=np.dot(Dstar,Bi)
Bi=Bi/np.linalg.norm(Bi)
Ti=np.dot(Mi,T)
la[0,k]=np.dot(nr,Bi)
la2[1,k]=np.arctan(Ti[0]/Ti[1])*180/np.pi
la2[0,k]=np.dot(nr,Bi)/np.sqrt(1-np.dot(T,Bi)**2)
k=k+1
ax1 = figure_width.add_subplot(111)
ax1.set_xlabel('alpha tilt angle')
ax1.tick_params('y', colors='black')
if ui_width.trace_radio_button.isChecked():
ax2 = ax1.twinx()
ax2.plot(range(-40,41,2),la2[1,:],'b-')
ax2.set_ylabel('trace angle', color='b')
ax2.tick_params('y', colors='b')
if ui_width.thickness_checkBox.isChecked():
t=np.float(ui_width.thickness.text())
d=t/np.sqrt(1-np.dot(nr,B)**2)
ax1.plot(range(-40,41,2),la2[0,:]*d,'r-')
ax1.set_ylabel('w (nm)', color='black')
else:
ax1.plot(range(-40,41,2),la2[0,:],'r-')
ax1.set_ylabel('w/d', color='black')
ax1.figure.canvas.draw()
def clear_width():
ax1 = figure_width.add_subplot(111)
ax1.figure.clf()
ax1.figure.canvas.draw()
####################################################
#
# Intersections
#
#######################################################
def intersect_norm(n1,n2,d):
global Dstar, D, M
Dr=1e10*D
Dstarr=1e-10*Dstar
n1=n1/np.linalg.norm(n1)
n1=np.dot(Dstarr,n1)
if d==0:
l=ui_inter.checkBox.isChecked()
n2=n2/np.linalg.norm(n2)
n2=np.dot(Dstarr,n2)
else:
l=ui_inter.checkBox_2.isChecked()
n2=np.dot(np.linalg.inv(M), n2)
n=np.cross(n1,n2)
if l:
n=np.dot(np.linalg.inv(Dr),n)
if var_hexa()==1:
na=(2*n[0]-n[1])/3
n2a=(2*n[1]-n[0])/3
n[0]=na
n[1]=n2a
else:
n=np.dot(np.linalg.inv(Dstarr),n)
return n
def intersections_plans():
n1_plan=ui_inter.n1_entry.text().split(",")
n1=np.array([np.float( n1_plan[0]),np.float(n1_plan[1]),np.float(n1_plan[2])])
n2_plan=ui_inter.n2_entry.text().split(",")
n2=np.array([np.float( n2_plan[0]),np.float(n2_plan[1]),np.float(n2_plan[2])])
n=intersect_norm(n1,n2,0)
ui_inter.n1n2_label.setText(str(np.round(100*n[0],decimals=3))+', '+str(np.round(100*n[1],decimals=3))+', '+str(np.round(100*n[2],decimals=3)))
def intersection_dir_proj():
global M
n_proj=ui_inter.n_proj_entry.text().split(",")
n=np.array([np.float( n_proj[0]),np.float(n_proj[1]),np.float(n_proj[2])])
angle=np.float(ui_inter.angle_proj_entry.text())*np.pi/180
norm_xyz=np.array([np.cos(angle),-np.sin(angle),0])
n_intersect=intersect_norm(n,norm_xyz,1)
ui_inter.n_proj_label.setText(str(np.round(100*n_intersect[0],decimals=3))+', '+str(np.round(100*n_intersect[1],decimals=3))+', '+str(np.round(100*n_intersect[2],decimals=3)))
def intersection_cone():
global Dstar,D
Dr=1e10*D
Dstarr=1e-10*Dstar
n_c=ui_inter.n_cone_entry.text().split(",")
n=np.array([np.float( n_c[0]),np.float(n_c[1]),np.float(n_c[2])])
c_c=ui_inter.cone_entry.text().split(",")
c=np.array([np.float( c_c[0]),np.float(c_c[1]),np.float(c_c[2])])
r=np.cos(np.float(ui_inter.cone_angle_entry.text())*np.pi/180)
n=np.dot(Dstarr,n)
n=n/np.linalg.norm(n)
c=np.dot(Dstarr,c)
c=c/np.linalg.norm(c)
x1=(c[0]*n[1]**2*r + c[0]*n[2]**2*r - c[1]*n[0]*n[1]*r - c[1]*n[2]*np.sqrt(c[0]**2*n[1]**2 + c[0]**2*n[2]**2 - 2*c[0]*c[1]*n[0]*n[1] - 2*c[0]*c[2]*n[0]*n[2] + c[1]**2*n[0]**2 + c[1]**2*n[2]**2 - 2*c[1]*c[2]*n[1]*n[2] + c[2]**2*n[0]**2 + c[2]**2*n[1]**2 - n[0]**2*r**2 - n[1]**2*r**2 - n[2]**2*r**2) - c[2]*n[0]*n[2]*r + c[2]*n[1]*np.sqrt(c[0]**2*n[1]**2 + c[0]**2*n[2]**2 - 2*c[0]*c[1]*n[0]*n[1] - 2*c[0]*c[2]*n[0]*n[2] + c[1]**2*n[0]**2 + c[1]**2*n[2]**2 - 2*c[1]*c[2]*n[1]*n[2] + c[2]**2*n[0]**2 + c[2]**2*n[1]**2 - n[0]**2*r**2 - n[1]**2*r**2 - n[2]**2*r**2))/(c[0]**2*n[1]**2 + c[0]**2*n[2]**2 - 2*c[0]*c[1]*n[0]*n[1] - 2*c[0]*c[2]*n[0]*n[2] + c[1]**2*n[0]**2 + c[1]**2*n[2]**2 - 2*c[1]*c[2]*n[1]*n[2] + c[2]**2*n[0]**2 + c[2]**2*n[1]**2)
y1=(-c[0]*n[0]*n[1]*r + c[0]*n[2]*np.sqrt(c[0]**2*n[1]**2 + c[0]**2*n[2]**2 - 2*c[0]*c[1]*n[0]*n[1] - 2*c[0]*c[2]*n[0]*n[2] + c[1]**2*n[0]**2 + c[1]**2*n[2]**2 - 2*c[1]*c[2]*n[1]*n[2] + c[2]**2*n[0]**2 + c[2]**2*n[1]**2 - n[0]**2*r**2 - n[1]**2*r**2 - n[2]**2*r**2) + c[1]*n[0]**2*r + c[1]*n[2]**2*r - c[2]*n[0]*np.sqrt(c[0]**2*n[1]**2 + c[0]**2*n[2]**2 - 2*c[0]*c[1]*n[0]*n[1] - 2*c[0]*c[2]*n[0]*n[2] + c[1]**2*n[0]**2 + c[1]**2*n[2]**2 - 2*c[1]*c[2]*n[1]*n[2] + c[2]**2*n[0]**2 + c[2]**2*n[1]**2 - n[0]**2*r**2 - n[1]**2*r**2 - n[2]**2*r**2) - c[2]*n[1]*n[2]*r)/(c[0]**2*n[1]**2 + c[0]**2*n[2]**2 - 2*c[0]*c[1]*n[0]*n[1] - 2*c[0]*c[2]*n[0]*n[2] + c[1]**2*n[0]**2 + c[1]**2*n[2]**2 - 2*c[1]*c[2]*n[1]*n[2] + c[2]**2*n[0]**2 + c[2]**2*n[1]**2)
z1=(-r*(c[0]*n[0]*n[2] + c[1]*n[1]*n[2] - c[2]*n[0]**2 - c[2]*n[1]**2) + (-c[0]*n[1] + c[1]*n[0])*np.sqrt(c[0]**2*n[1]**2 + c[0]**2*n[2]**2 - 2*c[0]*c[1]*n[0]*n[1] - 2*c[0]*c[2]*n[0]*n[2] + c[1]**2*n[0]**2 + c[1]**2*n[2]**2 - 2*c[1]*c[2]*n[1]*n[2] + c[2]**2*n[0]**2 + c[2]**2*n[1]**2 - n[0]**2*r**2 - n[1]**2*r**2 - n[2]**2*r**2))/(c[0]**2*n[1]**2 + c[0]**2*n[2]**2 - 2*c[0]*c[1]*n[0]*n[1] - 2*c[0]*c[2]*n[0]*n[2] + c[1]**2*n[0]**2 + c[1]**2*n[2]**2 - 2*c[1]*c[2]*n[1]*n[2] + c[2]**2*n[0]**2 + c[2]**2*n[1]**2)
x2=(c[0]*n[1]**2*r + c[0]*n[2]**2*r - c[1]*n[0]*n[1]*r + c[1]*n[2]*np.sqrt(c[0]**2*n[1]**2 + c[0]**2*n[2]**2 - 2*c[0]*c[1]*n[0]*n[1] - 2*c[0]*c[2]*n[0]*n[2] + c[1]**2*n[0]**2 + c[1]**2*n[2]**2 - 2*c[1]*c[2]*n[1]*n[2] + c[2]**2*n[0]**2 + c[2]**2*n[1]**2 - n[0]**2*r**2 - n[1]**2*r**2 - n[2]**2*r**2) - c[2]*n[0]*n[2]*r - c[2]*n[1]*np.sqrt(c[0]**2*n[1]**2 + c[0]**2*n[2]**2 - 2*c[0]*c[1]*n[0]*n[1] - 2*c[0]*c[2]*n[0]*n[2] + c[1]**2*n[0]**2 + c[1]**2*n[2]**2 - 2*c[1]*c[2]*n[1]*n[2] + c[2]**2*n[0]**2 + c[2]**2*n[1]**2 - n[0]**2*r**2 - n[1]**2*r**2 - n[2]**2*r**2))/(c[0]**2*n[1]**2 + c[0]**2*n[2]**2 - 2*c[0]*c[1]*n[0]*n[1] - 2*c[0]*c[2]*n[0]*n[2] + c[1]**2*n[0]**2 + c[1]**2*n[2]**2 - 2*c[1]*c[2]*n[1]*n[2] + c[2]**2*n[0]**2 + c[2]**2*n[1]**2)
y2=(-c[0]*n[0]*n[1]*r - c[0]*n[2]*np.sqrt(c[0]**2*n[1]**2 + c[0]**2*n[2]**2 - 2*c[0]*c[1]*n[0]*n[1] - 2*c[0]*c[2]*n[0]*n[2] + c[1]**2*n[0]**2 + c[1]**2*n[2]**2 - 2*c[1]*c[2]*n[1]*n[2] + c[2]**2*n[0]**2 + c[2]**2*n[1]**2 - n[0]**2*r**2 - n[1]**2*r**2 - n[2]**2*r**2) + c[1]*n[0]**2*r + c[1]*n[2]**2*r + c[2]*n[0]*np.sqrt(c[0]**2*n[1]**2 + c[0]**2*n[2]**2 - 2*c[0]*c[1]*n[0]*n[1] - 2*c[0]*c[2]*n[0]*n[2] + c[1]**2*n[0]**2 + c[1]**2*n[2]**2 - 2*c[1]*c[2]*n[1]*n[2] + c[2]**2*n[0]**2 + c[2]**2*n[1]**2 - n[0]**2*r**2 - n[1]**2*r**2 - n[2]**2*r**2) - c[2]*n[1]*n[2]*r)/(c[0]**2*n[1]**2 + c[0]**2*n[2]**2 - 2*c[0]*c[1]*n[0]*n[1] - 2*c[0]*c[2]*n[0]*n[2] + c[1]**2*n[0]**2 + c[1]**2*n[2]**2 - 2*c[1]*c[2]*n[1]*n[2] + c[2]**2*n[0]**2 + c[2]**2*n[1]**2)
z2=(-r*(c[0]*n[0]*n[2] + c[1]*n[1]*n[2] - c[2]*n[0]**2 - c[2]*n[1]**2) + (c[0]*n[1] - c[1]*n[0])*np.sqrt(c[0]**2*n[1]**2 + c[0]**2*n[2]**2 - 2*c[0]*c[1]*n[0]*n[1] - 2*c[0]*c[2]*n[0]*n[2] + c[1]**2*n[0]**2 + c[1]**2*n[2]**2 - 2*c[1]*c[2]*n[1]*n[2] + c[2]**2*n[0]**2 + c[2]**2*n[1]**2 - n[0]**2*r**2 - n[1]**2*r**2 - n[2]**2*r**2))/(c[0]**2*n[1]**2 + c[0]**2*n[2]**2 - 2*c[0]*c[1]*n[0]*n[1] - 2*c[0]*c[2]*n[0]*n[2] + c[1]**2*n[0]**2 + c[1]**2*n[2]**2 - 2*c[1]*c[2]*n[1]*n[2] + c[2]**2*n[0]**2 + c[2]**2*n[1]**2)
r1=np.array([x1,y1,z1])
r2=np.array([x2,y2,z2])
if ui_inter.checkBox_3.isChecked():
r1=np.dot(np.linalg.inv(Dr),r1)
r2=np.dot(np.linalg.inv(Dr),r2)
if var_hexa()==1:
na=(2*r1[0]-r1[1])/3
n2a=(2*r1[1]-r1[0])/3
r1[0]=na
r1[1]=n2a
na2=(2*r2[0]-r2[1])/3
n2a2=(2*r2[1]-r2[0])/3
r2[0]=na2
r2[1]=n2a2
else:
r1=np.dot(np.linalg.inv(Dstarr),r1)
r2=np.dot(np.linalg.inv(Dstarr),r2)
ui_inter.cone_plane_label.setText(str(np.round(100*r1[0], decimals=3))+','+str(np.round(100*r1[1], decimals=3))+','+str(np.round(100*r1[2], decimals=3))+'\n'+str(np.round(100*r2[0], decimals=3))+','+str(np.round(100*r2[1], decimals=3))+','+str(np.round(100*r2[2], decimals=3)) )
###################################################
#
# Plot Kikuchi bands / Diffraction pattern
# The diff spots/Kikuchi bands are computed within the kinematical approximation, owing the structure factor and scattering factors.
# indicated in the corresponding txt files. The scattering factor is computed for every elements according to
# a sum of Gaussian functions (see http://lampx.tugraz.at/~hadley/ss1/crystaldiffraction/atomicformfactors/formfactors.php)
#
#################################################
def diff_reciprocal():
global axesh_diff,axes_diff,G,V,Dstar
e=np.int(ui_kikuchi.indices_entry.text())
axes_diff=np.zeros(((2*e+1)**3-1,3))
axesh_diff=np.zeros(((2*e+1)**3-1,4))
id=0
for i in range(-e,e+1):
for j in range(-e,e+1):
for k in range(-e,e+1):
if (i,j,k)!=(0,0,0):
Ma=np.dot(Dstar,np.array([i,j,k],float))
axesh_diff[id,0:3]=Ma/np.linalg.norm(Ma)
if ui_kikuchi.diff_radioButton.isChecked():
axesh_diff[id,3]=extinction(ui.space_group_Box.currentText(),i,j,k,10000,1)[0]
axes_diff[id,:]=np.array([i,j,k])
if ui_kikuchi.kikuchi_radioButton.isChecked():
m=reduce(lambda x,y:GCD(x,y),[i,j,k])
if (np.around(i/m)==i/m) & (np.around(j/m)==j/m) & (np.around(k/m)==k/m):
axes_diff[id,:]=np.array([i,j,k])/m
else:
axes_diff[id,:]=np.array([i,j,k])
axesh_diff[id,3]
id=id+1
axesh_diff=axesh_diff[~np.all(axesh_diff[:,0:3]==0, axis=1)]
axes_diff=axes_diff[~np.all(axes_diff==0, axis=1)]
for z in range(0, np.shape(axes_diff)[0]):
I,h,k,l=extinction(ui.space_group_Box.currentText(),axes_diff[z,0],axes_diff[z,1],axes_diff[z,2],e,0)
if I>0:
axesh_diff[z,3]=I
axes_diff[z,:]=np.array([h,k,l])
else:
axesh_diff[z,0:3]=np.array([0,0,0])
axesh_diff[z,3]=1
axes_diff[z,:]=np.array([0,0,0])
axesh_diff=axesh_diff[~np.all(axesh_diff[:,0:3]==0, axis=1)]
axes_diff=axes_diff[~np.all(axes_diff==0, axis=1)]
return axes_diff, axesh_diff
def set_diff_cond():
ui_kikuchi.t_entry.setText('100')
ui_kikuchi.indices_entry.setText('5')
ui_kikuchi.angle_entry.setText('3')
ui_kikuchi.spot_size_entry.setText('100')
ui_kikuchi.error_entry.setText('1')
def set_kikuchi_cond():
ui_kikuchi.t_entry.setText(' ')
ui_kikuchi.indices_entry.setText('3')
ui_kikuchi.angle_entry.setText('15')
ui_kikuchi.spot_size_entry.setText(' ')
ui_kikuchi.error_entry.setText(' ')
def plot_kikuchi():
global M,G,V,axesh_diff,axes_diff
a_k = figure_kikuchi.add_subplot(111)
a_k.clear()
a_k = figure_kikuchi.add_subplot(111)
E=np.float(ui_kikuchi.E_entry.text())
lamb=6.6e-34/np.sqrt(2*9.1e-31*1.6e-19*E*1e3*(1+1.6e-19*E*1e3/2/9.31e-31/9e16))
ang=np.float(ui_kikuchi.angle_entry.text())*np.pi/180
ap=np.sin(ang)/(1+np.cos(ang))
lim=np.tan(ang)/lamb*1e-9
m=np.max(axesh_diff[:,3])
if ui_kikuchi.diff_radioButton.isChecked():
smax=np.float(ui_kikuchi.error_entry.text())*1e9
ang_max=np.arccos(1-lamb*smax)
thick=np.float(ui_kikuchi.t_entry.text())*1e-9
for t in range(0,np.shape(axesh_diff)[0]):
T=np.dot(M,axesh_diff[t,0:3])
if np.abs(T[2])<np.sin(ang_max):
Fg=np.sqrt(axesh_diff[t,3])*1e-10
d=1/(np.sqrt(np.dot(axes_diff[t,:],np.dot(np.linalg.inv(G),axes_diff[t,:]))))
tb=np.arcsin(lamb/2/d)*180/np.pi
S=(np.dot(Rot(2*tb,-T[1],T[0],0),np.array([0,0,1]))-np.array([0,0,1]))/lamb-T/d
s=np.linalg.norm(S)
xi=np.pi*V*np.cos(tb*np.pi/180)/lamb/Fg
se=np.sqrt(s**2+1/xi**2)
I=(thick*np.pi/xi)**2*np.sinc(se*thick)**2
st=str(int(axes_diff[t,0]))+','+str(int(axes_diff[t,1]))+','+str(int(axes_diff[t,2]))
if ui_kikuchi.label_checkBox.isChecked():
a_k.annotate(st,(T[0]/d*1e-9,T[1]/d*1e-9), color="white")
a_k.scatter(T[0]/d*1e-9,T[1]/d*1e-9,s=I*np.float(ui_kikuchi.spot_size_entry.text()), color="white")
a_k.plot(0,0,'w+')
a_k.axis('equal')
a_k.axis([-lim,lim,-lim,lim])
a_k.axis('off')
if ui_kikuchi.kikuchi_radioButton.isChecked():
for t in range(0,np.shape(axesh_diff)[0]):
T=np.dot(M,axesh_diff[t,0:3])
if np.abs(T[2])<np.sin(ang):
r=np.sqrt(T[0]**2+T[1]**2+T[2]**2)
A=np.zeros((2,50))
B=np.zeros((2,50))
Qa=np.zeros((1,2))
Qb=np.zeros((1,2))
th=np.arctan2(T[1],T[0])*180/np.pi
w=0
ph=np.arccos(T[2]/r)*180/np.pi
d=1/(np.sqrt(np.dot(axes_diff[t,:],np.dot(np.linalg.inv(G),axes_diff[t,:]))))
tb=np.arcsin(lamb/2/d)*180/np.pi/2
for g in np.linspace(-np.pi/2,np.pi/2,50):
Aa=np.dot(Rot(th,0,0,1),np.dot(Rot(ph-tb,0,1,0),np.array([np.sin(g),np.cos(g),0])))
Ab=np.dot(Rot(th,0,0,1),np.dot(Rot(ph+tb,0,1,0),np.array([np.sin(g),np.cos(g),0])))
A[:,w]=proj_gnomonic(Aa[0],Aa[1],Aa[2])*300
B[:,w]=proj_gnomonic(Ab[0],Ab[1],Ab[2])*300
Qa=np.vstack((Qa,A[:,w]))
Qb=np.vstack((Qb,B[:,w]))
w=w+1
Qa=np.delete(Qa,0,0)
Qb=np.delete(Qb,0,0)
st=str(int(axes_diff[t,0]))+','+str(int(axes_diff[t,1]))+','+str(int(axes_diff[t,2]))
if ui_kikuchi.label_checkBox.isChecked():
a_k.annotate(st,(Qa[2,0]+300,Qa[2,1]+300),ha='center', va='center',rotation=th-90, color="white")
a_k.plot(Qa[:,0]+300,Qa[:,1]+300,'w-', linewidth=axesh_diff[t,3]/m)
a_k.plot(Qb[:,0]+300,Qb[:,1]+300,'w-', linewidth=axesh_diff[t,3]/m)
a_k.plot(300,300,'wo')
a_k.set_facecolor('black')
a_k.axis('equal')
a_k.axis([300*(1-ap),300*(1+ap),300*(1-ap),300*(1+ap)])
a_k.figure.canvas.draw()
##################################################
#
# Add matplotlib toolbar to zoom and pan
#
###################################################
class NavigationToolbar(NavigationToolbar):
# only display the buttons we need
toolitems = [t for t in NavigationToolbar.toolitems if
t[0] in ('Pan', 'Zoom')]
def set_message(self, msg):
pass
#############################################################
#
# Launch
#
#############################################################"
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
Index = QtGui.QMainWindow()
ui = stereoprojUI.Ui_StereoProj()
ui.setupUi(Index)
figure=plt.figure()
canvas=FigureCanvas(figure)
ui.mplvl.addWidget(canvas)
toolbar = NavigationToolbar(canvas, canvas)
toolbar.setMinimumWidth(601)
# Read structure file
file_struct=open(os.path.join(os.path.dirname(__file__), 'structure.txt') ,"r")
x0=[]
for line in file_struct:
x0.append(map(str, line.split()))
i=0
file_struct.close()
for item in x0:
entry = ui.menuStructure.addAction(item[0])
Index.connect(entry,QtCore.SIGNAL('triggered()'), lambda item=item: structure(item))
i=i+1
# Read space_group file
f_space=open(os.path.join(os.path.dirname(__file__), 'space_group.txt'),"r")
x_space=[]
for line in f_space:
x_space.append(map(str, line.split()))
ui.space_group_Box.addItems(" ")
for i in range(0,len(x_space)):
if len(x_space[i])==1:
ui.space_group_Box.addItems(x_space[i])
f_space.close()
# Read scattering factor file
f_scatt=open(os.path.join(os.path.dirname(__file__), 'scattering.txt'),"r")
x_scatt=[]
for line in f_scatt:
x_scatt.append(map(str, line.split()))
f_scatt.close()
# Ctrl+z shortcut to remove clicked pole
shortcut = QtGui.QShortcut(QtGui.QKeySequence("Ctrl+z"), Index)
shortcut.activated.connect(undo_click_a_pole)
# Connect dialog boxes and buttons
Index.connect(ui.actionSave_figure, QtCore.SIGNAL('triggered()'), image_save)
Angle=QtGui.QDialog()
ui_angle=angleUI.Ui_Angle()
ui_angle.setupUi(Angle)
Index.connect(ui.actionCalculate_angle, QtCore.SIGNAL('triggered()'), Angle.show)
ui_angle.buttonBox.rejected.connect(Angle.close)
ui_angle.buttonBox.accepted.connect(angle)
Xyz=QtGui.QDialog()
ui_xyz=xyzUI.Ui_xyz_dialog()
ui_xyz.setupUi(Xyz)
Index.connect(ui.actionCalculate_xyz, QtCore.SIGNAL('triggered()'), Xyz.show)
ui_xyz.xyz_button.clicked.connect(center)
Hkl_uvw=QtGui.QDialog()
ui_hkl_uvw=hkl_uvwUI.Ui_hkl_uvw()
ui_hkl_uvw.setupUi(Hkl_uvw)
Index.connect(ui.actionHkl_uvw, QtCore.SIGNAL('triggered()'),Hkl_uvw.show)
ui_hkl_uvw.pushButton_to_uvw.clicked.connect(to_uvw)
ui_hkl_uvw.pushButton_to_hkl.clicked.connect(to_hkl)
Schmid=QtGui.QDialog()
ui_schmid=schmidUI.Ui_Schmid()
ui_schmid.setupUi(Schmid)
Index.connect(ui.actionCalculate_Schmid_factor, QtCore.SIGNAL('triggered()'), Schmid.show)
ui_schmid.buttonBox.rejected.connect(Schmid.close)
ui_schmid.buttonBox.accepted.connect(schmid)
Width=QtGui.QDialog()
ui_width=widthUI.Ui_Width()
ui_width.setupUi(Width)
Index.connect(ui.actionCalculate_apparent_width, QtCore.SIGNAL('triggered()'), Width.show)
ui_width.buttonBox.rejected.connect(Width.close)
ui_width.buttonBox.accepted.connect(plot_width)
ui_width.clear_button.clicked.connect(clear_width)
figure_width=plt.figure()
canvas_width=FigureCanvas(figure_width)
ui_width.mplvl.addWidget(canvas_width)
toolbar_width = NavigationToolbar(canvas_width, canvas_width)
toolbar_width.setMinimumWidth(601)
Intersections = QtGui.QDialog()
ui_inter=intersectionsUI.Ui_Intersections()
ui_inter.setupUi(Intersections)
Index.connect(ui.actionCalculate_intersections, QtCore.SIGNAL('triggered()'), Intersections.show)
ui_inter.pushButton_intersections_plans.clicked.connect(intersections_plans)
ui_inter.pushButton_intersection_proj.clicked.connect(intersection_dir_proj)
ui_inter.pushButton_intersection_cone.clicked.connect(intersection_cone)
Kikuchi=QtGui.QDialog()
ui_kikuchi=kikuchiUI.Ui_Kikuchi()
ui_kikuchi.setupUi(Kikuchi)
Index.connect(ui.actionPlot_Kikuchi_lines, QtCore.SIGNAL('triggered()'), Kikuchi.show)
ui_kikuchi.buttonBox.rejected.connect(Kikuchi.close)
ui_kikuchi.buttonBox.accepted.connect(plot_kikuchi)
ui_kikuchi.Diff_button.clicked.connect(diff_reciprocal)
ui_kikuchi.diff_radioButton.clicked.connect(set_diff_cond)
ui_kikuchi.kikuchi_radioButton.clicked.connect(set_kikuchi_cond)
ui_kikuchi.E_entry.setText('200')
figure_kikuchi=plt.figure()
figure_kikuchi.patch.set_facecolor('black')
canvas_kikuchi=FigureCanvas(figure_kikuchi)
ui_kikuchi.mplvl.addWidget(canvas_kikuchi)
toolbar_kikuchi = NavigationToolbar(canvas_kikuchi, canvas_kikuchi)
toolbar_kikuchi.setMinimumWidth(100)
toolbar_kikuchi.setStyleSheet("background-color:White;")
ui.button_trace2.clicked.connect(princ2)
ui.button_trace.clicked.connect(princ)
ui.reciprocal_checkBox.stateChanged.connect(lattice_reciprocal)
ui.angle_alpha_buttonp.clicked.connect(rot_alpha_p)
ui.angle_alpha_buttonm.clicked.connect(rot_alpha_m)
ui.angle_beta_buttonp.clicked.connect(rot_beta_p)
ui.angle_beta_buttonm.clicked.connect(rot_beta_m)
ui.angle_z_buttonp.clicked.connect(rot_z_p)
ui.angle_z_buttonm.clicked.connect(rot_z_m)
ui.rot_gm_button.clicked.connect(rotgm)
ui.rot_gp_button.clicked.connect(rotgp)
ui.lock_checkButton.stateChanged.connect(lock)
ui.addpole_button.clicked.connect(addpole)
ui.undo_addpole_button.clicked.connect(undo_addpole)
ui.sym_button.clicked.connect(addpole_sym)
ui.undo_sym_button.clicked.connect(undo_sym)
ui.trace_plan_button.clicked.connect(trace_addplan)
ui.undo_trace_plan_button.clicked.connect(undo_trace_addplan)
ui.trace_cone_button.clicked.connect(trace_addcone)
ui.undo_trace_cone_button.clicked.connect(undo_trace_addcone)
ui.trace_plan_sym_button.clicked.connect(trace_plan_sym)
ui.undo_trace_plan_sym_button.clicked.connect(undo_trace_plan_sym)
ui.trace_schmid_button.clicked.connect(schmid_trace)
ui.undo_trace_schmid.clicked.connect(undo_schmid_trace)
ui.norm_button.clicked.connect(dhkl)
ui.dm_button.clicked.connect(dm)
ui.dp_button.clicked.connect(dp)
ui.reset_view_button.clicked.connect(reset_view)
figure.canvas.mpl_connect('motion_notify_event', coordinates)
figure.canvas.mpl_connect('button_press_event', click_a_pole)
# Initialize variables
dmip=0
var_lock=0
ui.lock_checkButton.setChecked(False)
ui.color_trace_bleu.setChecked(True)
ui.wulff_button.setChecked(True)
ui.wulff_button.setChecked(True)
ui.d_label_var.setText('0')
ui.text_size_entry.setText('12')
mpl.rcParams['font.size'] = ui.text_size_entry.text()
ui.abc_entry.setText('1,1,1')
ui.alphabetagamma_entry.setText('90,90,90')
ui.phi1phiphi2_entry.setText('0,0,0')
ui.e_entry.setText('1')
ui.rg_label.setText('0.0')
ui.angle_euler_label.setText(' ')
ui.size_var.setText('40')
ui.e_entry.setText('1')
ui.angle_alpha_entry.setText('5')
ui.angle_beta_entry.setText('5')
ui.angle_z_entry.setText('5')
ui.angle_beta_entry.setText('5')
ui.angle_z_entry.setText('5')
ui.tilt_angle_entry.setText('0')
ui.image_angle_entry.setText('0')
ui.d_entry.setText('1')
ui.rot_g_entry.setText('5')
ui.inclination_entry.setText('30')
a = figure.add_subplot(111)
tilt_axes()
wulff()
Index.show()
sys.exit(app.exec_())
| gpl-2.0 |
trankmichael/scikit-learn | sklearn/linear_model/tests/test_passive_aggressive.py | 121 | 6117 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal, assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import ClassifierMixin
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import PassiveAggressiveRegressor
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
class MyPassiveAggressive(ClassifierMixin):
def __init__(self, C=1.0, epsilon=0.01, loss="hinge",
fit_intercept=True, n_iter=1, random_state=None):
self.C = C
self.epsilon = epsilon
self.loss = loss
self.fit_intercept = fit_intercept
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
p = self.project(X[i])
if self.loss in ("hinge", "squared_hinge"):
loss = max(1 - y[i] * p, 0)
else:
loss = max(np.abs(p - y[i]) - self.epsilon, 0)
sqnorm = np.dot(X[i], X[i])
if self.loss in ("hinge", "epsilon_insensitive"):
step = min(self.C, loss / sqnorm)
elif self.loss in ("squared_hinge",
"squared_epsilon_insensitive"):
step = loss / (sqnorm + 1.0 / (2 * self.C))
if self.loss in ("hinge", "squared_hinge"):
step *= y[i]
else:
step *= np.sign(y[i] - p)
self.w += step * X[i]
if self.fit_intercept:
self.b += step
def project(self, X):
return np.dot(X, self.w) + self.b
def test_classifier_accuracy():
for data in (X, X_csr):
for fit_intercept in (True, False):
clf = PassiveAggressiveClassifier(C=1.0, n_iter=30,
fit_intercept=fit_intercept,
random_state=0)
clf.fit(data, y)
score = clf.score(data, y)
assert_greater(score, 0.79)
def test_classifier_partial_fit():
classes = np.unique(y)
for data in (X, X_csr):
clf = PassiveAggressiveClassifier(C=1.0,
fit_intercept=True,
random_state=0)
for t in range(30):
clf.partial_fit(data, y, classes)
score = clf.score(data, y)
assert_greater(score, 0.79)
def test_classifier_refit():
# Classifier can be retrained on different labels and features.
clf = PassiveAggressiveClassifier().fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
clf.fit(X[:, :-1], iris.target_names[y])
assert_array_equal(clf.classes_, iris.target_names)
def test_classifier_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("hinge", "squared_hinge"):
clf1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
clf1.fit(X, y_bin)
for data in (X, X_csr):
clf2 = PassiveAggressiveClassifier(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2, shuffle=False)
clf2.fit(data, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel(), decimal=2)
def test_classifier_undefined_methods():
clf = PassiveAggressiveClassifier()
for meth in ("predict_proba", "predict_log_proba", "transform"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
def test_regressor_mse():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
for fit_intercept in (True, False):
reg = PassiveAggressiveRegressor(C=1.0, n_iter=50,
fit_intercept=fit_intercept,
random_state=0)
reg.fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
def test_regressor_partial_fit():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
reg = PassiveAggressiveRegressor(C=1.0,
fit_intercept=True,
random_state=0)
for t in range(50):
reg.partial_fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
def test_regressor_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("epsilon_insensitive", "squared_epsilon_insensitive"):
reg1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
reg1.fit(X, y_bin)
for data in (X, X_csr):
reg2 = PassiveAggressiveRegressor(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2, shuffle=False)
reg2.fit(data, y_bin)
assert_array_almost_equal(reg1.w, reg2.coef_.ravel(), decimal=2)
def test_regressor_undefined_methods():
reg = PassiveAggressiveRegressor()
for meth in ("transform",):
assert_raises(AttributeError, lambda x: getattr(reg, x), meth)
| bsd-3-clause |
dquartul/BLonD | blond/llrf/offset_frequency.py | 2 | 7257 | # coding: utf8
# Copyright 2014-2017 CERN. This software is distributed under the
# terms of the GNU General Public Licence version 3 (GPL Version 3),
# copied verbatim in the file LICENCE.md.
# In applying this licence, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization or
# submit itself to any jurisdiction.
# Project website: http://blond.web.cern.ch/
'''
**Frequency corrections to design frequency to allow fixed injection frequency
and frequency offsets**
:Authors: **Simon Albright**
'''
import numpy as np
import matplotlib.pyplot as plt
import scipy.constants as cont
class _FrequencyOffset(object):
'''
Compute effect of having a different RF and design frequency
'''
def __init__(self, Ring, RFStation, System = None, MainH = None):
#: | *Import Ring*
self.ring = Ring
#: | *Import RFStation*
self.rf_station = RFStation
#: | *Set system number(s) to modify, if None all are modified*
if isinstance(System, int):
self.system = [System]
elif hasattr(System, '__iter__'):
self.system = []
for s in System:
self.system.append(s)
elif System is None:
self.system = System
else:
raise TypeError("System must be int, iterable of ints or None")
if self.system and not all((isinstance(s, int) for s in self.system)):
raise TypeError("System must be int, iterable of ints or None")
#: | *Main harmonic the delta F is taken as being in reference to,
#: | if None RFStation.harmonic[0][0] is taken as the main*
if MainH is not None:
self.mainH = MainH
else:
self.mainH = RFStation.harmonic[0][0]
def set_frequency(self, NewFrequencyProgram):
'''
Set new frequency program
'''
#: | *Check of frequency is passed as array of [time, freq]*
if isinstance(NewFrequencyProgram, np.ndarray):
if NewFrequencyProgram.shape[0] == 2:
end_turn = np.where(self.ring.cycle_time >= \
NewFrequencyProgram[0][-1])[0][0]
NewFrequencyProgram = np.interp(self.ring.cycle_time[:end_turn],\
NewFrequencyProgram[0], NewFrequencyProgram[1])
#: | *Store new frequency as numpy array relative to the main harmonic*
self.new_frequency = np.array(NewFrequencyProgram)/self.mainH
self.end_turn = len(self.new_frequency)
#: | *Store design frequency during offset*
self.design_frequency = self.rf_station.omega_rf_d[:,:self.end_turn]
def calculate_phase_slip(self):
'''
Calculate the phase slippage resulting from the frequency offset for \
each RF system
'''
delta_phi = (2*np.pi * self.rf_station.harmonic[:,:self.end_turn]
* (self.rf_station.harmonic[:,:self.end_turn]
* self.new_frequency
- self.design_frequency)
/ self.design_frequency)
self.phase_slippage = np.cumsum(delta_phi, axis=1)
def apply_new_frequency(self):
'''
Sets the RF frequency and phase
'''
if self.system is None:
self.rf_station.omega_rf[:, :self.end_turn] = \
(self.rf_station.harmonic[:, :self.end_turn]
* self.new_frequency)
self.rf_station.phi_rf[:, :self.end_turn] += self.phase_slippage
for n in range(self.rf_station.n_rf):
self.rf_station.phi_rf[n, self.end_turn:] \
+= self.phase_slippage[n,-1]
else:
for system in self.system:
self.rf_station.omega_rf[system, :self.end_turn] \
= (self.rf_station.harmonic[system, :self.end_turn]
* self.new_frequency)
self.rf_station.phi_rf[system, :self.end_turn] \
+= self.phase_slippage[system]
self.rf_station.phi_rf[system, self.end_turn:] \
+= self.phase_slippage[system,-1]
class FixedFrequency(_FrequencyOffset):
'''
Compute effect of fixed RF frequency different to frequency from momentum
program at the start of the cycle.
'''
def __init__(self, Ring, RFStation, FixedFrequency, FixedDuration,
TransitionDuration, transition = 1):
_FrequencyOffset.__init__(self, Ring, RFStation)
#: | *Set value of fixed frequency*
self.fixed_frequency = FixedFrequency
#: | *Duration of fixed frequency*
self.fixed_duration = FixedDuration
#: | *Duration of transition to design frequency*
self.transition_duration = TransitionDuration
self.end_fixed_turn = np.where(self.ring.cycle_time >= \
self.fixed_duration)[0][0]
self.end_transition_turn = np.where(self.ring.cycle_time >= \
(self.fixed_duration + self.transition_duration))[0][0]
self.end_frequency = self.rf_station.omega_rf_d[0, self.end_transition_turn]
if transition == 1:
self.calculate_frequency_prog = self.transition_1
self.compute()
def compute(self):
self.calculate_frequency_prog()
self.set_frequency(self.frequency_prog)
self.calculate_phase_slip()
self.apply_new_frequency()
def linear_calculate_frequency_prog(self):
'''
Calculate the fixed and transition frequency programs turn by turn
'''
fixed_frequency_prog = np.ones(self.end_fixed_turn)*self.fixed_frequency
transition_frequency_prog = np.linspace(float(self.fixed_frequency),
float(self.end_frequency),
(self.end_transition_turn
- self.end_fixed_turn))
self.frequency_prog = np.concatenate((fixed_frequency_prog, \
transition_frequency_prog))
def transition_1(self):
t1 = (self.ring.cycle_time[self.end_transition_turn]
- self.ring.cycle_time[self.end_fixed_turn])
f1 = self.end_frequency
f1Prime = (np.gradient(self.rf_station.omega_rf_d[0])
/np.gradient(self.ring.cycle_time))[self.end_transition_turn]
constA = (t1*f1Prime - 2*(f1 - self.fixed_frequency))/t1**3
constB = - (t1*f1Prime - 3*(f1 - self.fixed_frequency))/t1**2
transTime = (self.ring.cycle_time[self.end_fixed_turn
:self.end_transition_turn]
- self.ring.cycle_time[self.end_fixed_turn])
transition_freq = (constA * transTime**3 + constB * transTime**2
+ self.fixed_frequency)
self.frequency_prog = np.concatenate((np.ones(self.end_fixed_turn)
* self.fixed_frequency
, transition_freq))
| gpl-3.0 |
ClimbsRocks/scikit-learn | examples/ensemble/plot_random_forest_embedding.py | 20 | 3529 | """
=========================================================
Hashing feature transformation using Totally Random Trees
=========================================================
RandomTreesEmbedding provides a way to map data to a
very high-dimensional, sparse representation, which might
be beneficial for classification.
The mapping is completely unsupervised and very efficient.
This example visualizes the partitions given by several
trees and shows how the transformation can also be used for
non-linear dimensionality reduction or non-linear classification.
Points that are neighboring often share the same leaf of a tree and therefore
share large parts of their hashed representation. This allows to
separate two concentric circles simply based on the principal components of the
transformed data.
In high-dimensional spaces, linear classifiers often achieve
excellent accuracy. For sparse binary data, BernoulliNB
is particularly well-suited. The bottom row compares the
decision boundary obtained by BernoulliNB in the transformed
space with an ExtraTreesClassifier forests learned on the
original data.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_circles
from sklearn.ensemble import RandomTreesEmbedding, ExtraTreesClassifier
from sklearn.decomposition import TruncatedSVD
from sklearn.naive_bayes import BernoulliNB
# make a synthetic dataset
X, y = make_circles(factor=0.5, random_state=0, noise=0.05)
# use RandomTreesEmbedding to transform data
hasher = RandomTreesEmbedding(n_estimators=10, random_state=0, max_depth=3)
X_transformed = hasher.fit_transform(X)
# Visualize result using PCA
pca = TruncatedSVD(n_components=2)
X_reduced = pca.fit_transform(X_transformed)
# Learn a Naive Bayes classifier on the transformed data
nb = BernoulliNB()
nb.fit(X_transformed, y)
# Learn an ExtraTreesClassifier for comparison
trees = ExtraTreesClassifier(max_depth=3, n_estimators=10, random_state=0)
trees.fit(X, y)
# scatter plot of original and reduced data
fig = plt.figure(figsize=(9, 8))
ax = plt.subplot(221)
ax.scatter(X[:, 0], X[:, 1], c=y, s=50)
ax.set_title("Original Data (2d)")
ax.set_xticks(())
ax.set_yticks(())
ax = plt.subplot(222)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], c=y, s=50)
ax.set_title("PCA reduction (2d) of transformed data (%dd)" %
X_transformed.shape[1])
ax.set_xticks(())
ax.set_yticks(())
# Plot the decision in original space. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
h = .01
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# transform grid using RandomTreesEmbedding
transformed_grid = hasher.transform(np.c_[xx.ravel(), yy.ravel()])
y_grid_pred = nb.predict_proba(transformed_grid)[:, 1]
ax = plt.subplot(223)
ax.set_title("Naive Bayes on Transformed data")
ax.pcolormesh(xx, yy, y_grid_pred.reshape(xx.shape))
ax.scatter(X[:, 0], X[:, 1], c=y, s=50)
ax.set_ylim(-1.4, 1.4)
ax.set_xlim(-1.4, 1.4)
ax.set_xticks(())
ax.set_yticks(())
# transform grid using ExtraTreesClassifier
y_grid_pred = trees.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
ax = plt.subplot(224)
ax.set_title("ExtraTrees predictions")
ax.pcolormesh(xx, yy, y_grid_pred.reshape(xx.shape))
ax.scatter(X[:, 0], X[:, 1], c=y, s=50)
ax.set_ylim(-1.4, 1.4)
ax.set_xlim(-1.4, 1.4)
ax.set_xticks(())
ax.set_yticks(())
plt.tight_layout()
plt.show()
| bsd-3-clause |
econ-ark/HARK | examples/ConsIndShockModel/KinkedRconsumerType.py | 1 | 15350 | # ---
# jupyter:
# jupytext:
# cell_metadata_filter: collapsed,code_folding
# cell_metadata_json: true
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.6.0
# kernelspec:
# display_name: econ-ark-3.8
# language: python
# name: econ-ark-3.8
# ---
# %% [markdown]
# # KinkedRconsumerType: Consumption-saving model with idiosyncratic income shocks and different interest rates on borrowing and saving
# %% {"code_folding": [0]}
# Initial imports and notebook setup, click arrow to show
import matplotlib.pyplot as plt
import numpy as np
from HARK.ConsumptionSaving.ConsIndShockModel import KinkedRconsumerType
from HARK.utilities import plot_funcs_der, plot_funcs
mystr = lambda number: "{:.4f}".format(number)
# %% [markdown]
# The module `HARK.ConsumptionSaving.ConsIndShockModel` concerns consumption-saving models with idiosyncratic shocks to (non-capital) income. All of the models assume CRRA utility with geometric discounting, no bequest motive, and income shocks are fully transitory or fully permanent.
#
# `ConsIndShockModel` currently includes three models:
# 1. A very basic "perfect foresight" model with no uncertainty.
# 2. A model with risk over transitory and permanent income shocks.
# 3. The model described in (2), with an interest rate for debt that differs from the interest rate for savings.
#
# This notebook provides documentation for the third of these models.
# $\newcommand{\CRRA}{\rho}$
# $\newcommand{\DiePrb}{\mathsf{D}}$
# $\newcommand{\PermGroFac}{\Gamma}$
# $\newcommand{\Rfree}{\mathsf{R}}$
# $\newcommand{\DiscFac}{\beta}$
# %% [markdown]
# ## Statement of "kinked R" model
#
# Consider a small extension to the model faced by `IndShockConsumerType`s: that the interest rate on borrowing $a_t < 0$ is greater than the interest rate on saving $a_t > 0$. Consumers who face this kind of problem are represented by the $\texttt{KinkedRconsumerType}$ class.
#
# For a full theoretical treatment, this model analyzed in [A Theory of the Consumption Function, With
# and Without Liquidity Constraints](http://www.econ2.jhu.edu/people/ccarroll/ATheoryv3JEP.pdf)
# and its [expanded edition](http://www.econ2.jhu.edu/people/ccarroll/ATheoryv3NBER.pdf).
#
# Continuing to work with *normalized* variables (e.g. $m_t$ represents the level of market resources divided by permanent income), the "kinked R" model can be stated as:
#
# \begin{eqnarray*}
# v_t(m_t) &=& \max_{c_t} {~} U(c_t) + \DiscFac (1-\DiePrb_{t+1}) \mathbb{E}_{t} \left[ (\PermGroFac_{t+1}\psi_{t+1})^{1-\CRRA} v_{t+1}(m_{t+1}) \right], \\
# a_t &=& m_t - c_t, \\
# a_t &\geq& \underline{a}, \\
# m_{t+1} &=& \Rfree_t/(\PermGroFac_{t+1} \psi_{t+1}) a_t + \theta_{t+1}, \\
# \Rfree_t &=& \cases{\Rfree_{boro} \texttt{ if } a_t < 0 \\
# \Rfree_{save} \texttt{ if } a_t \geq 0},\\
# \Rfree_{boro} &>& \Rfree_{save}, \\
# (\psi_{t+1},\theta_{t+1}) &\sim& F_{t+1}, \\
# \mathbb{E}[\psi]=\mathbb{E}[\theta] &=& 1.
# \end{eqnarray*}
# %% [markdown]
# ## Solving the "kinked R" model
#
# The solution method for the "kinked R" model is nearly identical to that of the `IndShockConsumerType` on which it is based, using the endogenous grid method; see the notebook for that model for more information. The only significant difference is that the interest factor varies by $a_t$ across the exogenously chosen grid of end-of-period assets, with a discontinuity in $\Rfree$ at $a_t=0$.
#
# To correctly handle this, the `solveConsKinkedR` function inserts *two* instances of $a_t=0$ into the grid of $a_t$ values: the first corresponding to $\Rfree_{boro}$ ($a_t = -0$) and the other corresponding to $\Rfree_{save}$ ($a_t = +0$). The two consumption levels (and corresponding endogenous $m_t$ gridpoints) represent points at which the agent's first order condition is satisfied at *exactly* $a_t=0$ at the two different interest factors. In between these two points, the first order condition *does not hold with equality*: the consumer will end the period with exactly $a_t=0$, consuming $c_t=m_t$, but his marginal utility of consumption exceeds the marginal value of saving and is less than the marginal value of borrowing. This generates a consumption function with *two* kinks: two concave portions (for borrowing and saving) with a linear segment of slope 1 in between.
# %% [markdown]
# ## Example parameter values to construct an instance of KinkedRconsumerType
#
# The parameters required to create an instance of `KinkedRconsumerType` are nearly identical to those for `IndShockConsumerType`. The only difference is that the parameter $\texttt{Rfree}$ is replaced with $\texttt{Rboro}$ and $\texttt{Rsave}$.
#
# While the parameter $\texttt{CubicBool}$ is required to create a valid `KinkedRconsumerType` instance, it must be set to `False`; cubic spline interpolation has not yet been implemented for this model. In the future, this restriction will be lifted.
#
# | Parameter | Description | Code | Example value | Time-varying? |
# | :---: | --- | --- | --- | --- |
# | $\DiscFac$ |Intertemporal discount factor | $\texttt{DiscFac}$ | $0.96$ | |
# | $\CRRA $ |Coefficient of relative risk aversion | $\texttt{CRRA}$ | $2.0$ | |
# | $\Rfree_{boro}$ | Risk free interest factor for borrowing | $\texttt{Rboro}$ | $1.20$ | |
# | $\Rfree_{save}$ | Risk free interest factor for saving | $\texttt{Rsave}$ | $1.01$ | |
# | $1 - \DiePrb_{t+1}$ |Survival probability | $\texttt{LivPrb}$ | $[0.98]$ | $\surd$ |
# |$\PermGroFac_{t+1}$|Permanent income growth factor|$\texttt{PermGroFac}$| $[1.01]$ | $\surd$ |
# | $\sigma_\psi $ | Standard deviation of log permanent income shocks | $\texttt{PermShkStd}$ | $[0.1]$ |$\surd$ |
# | $N_\psi $ | Number of discrete permanent income shocks | $\texttt{PermShkCount}$ | $7$ | |
# | $\sigma_\theta $ | Standard deviation of log transitory income shocks | $\texttt{TranShkStd}$ | $[0.2]$ | $\surd$ |
# | $N_\theta $ | Number of discrete transitory income shocks | $\texttt{TranShkCount}$ | $7$ | |
# | $\mho$ | Probability of being unemployed and getting $\theta=\underline{\theta}$ | $\texttt{UnempPrb}$ | $0.05$ | |
# | $\underline{\theta} $ | Transitory shock when unemployed | $\texttt{IncUnemp}$ | $0.3$ | |
# | $\mho^{Ret}$ | Probability of being "unemployed" when retired | $\texttt{UnempPrb}$ | $0.0005$ | |
# | $\underline{\theta}^{Ret} $ | Transitory shock when "unemployed" and retired | $\texttt{IncUnemp}$ | $0.0$ | |
# | $(none)$ | Period of the lifecycle model when retirement begins | $\texttt{T_retire}$ | $0$ | |
# | $(none)$ | Minimum value in assets-above-minimum grid | $\texttt{aXtraMin}$ | $0.001$ | |
# | $(none)$ | Maximum value in assets-above-minimum grid | $\texttt{aXtraMax}$ | $20.0$ | |
# | $(none)$ | Number of points in base assets-above-minimum grid | $\texttt{aXtraCount}$ | $48$ | |
# | $(none)$ | Exponential nesting factor for base assets-above-minimum grid | $\texttt{aXtraNestFac}$ | $3$ | |
# | $(none)$ | Additional values to add to assets-above-minimum grid | $\texttt{aXtraExtra}$ | $None$ | |
# | $\underline{a} $ | Artificial borrowing constraint (normalized) | $\texttt{BoroCnstArt}$ | $None$ | |
# | $(none) $ |Indicator for whether $\texttt{vFunc}$ should be computed | $\texttt{vFuncBool}$ | $True$ | |
# | $(none)$ |Indicator for whether $\texttt{cFunc}$ should use cubic splines | $\texttt{CubicBool}$ | $False$ | |
# |$T$| Number of periods in this type's "cycle" |$\texttt{T_cycle}$| $1$ | |
# |(none)| Number of times the "cycle" occurs |$\texttt{cycles}$| $0$ | |
#
# These example parameters are almostidentical to those used for `IndShockExample` in the prior notebook, except that the interest rate on borrowing is 20% (like a credit card), and the interest rate on saving is 1%. Moreover, the artificial borrowing constraint has been set to `None`. The cell below defines a parameter dictionary with these example values.
# %% {"code_folding": [0]}
KinkedRdict = { # Click the arrow to expand this parameter dictionary
# Parameters shared with the perfect foresight model
"CRRA": 2.0, # Coefficient of relative risk aversion
"DiscFac": 0.96, # Intertemporal discount factor
"LivPrb": [0.98], # Survival probability
"PermGroFac": [1.01], # Permanent income growth factor
"BoroCnstArt": None, # Artificial borrowing constraint; imposed minimum level of end-of period assets
# New parameters unique to the "kinked R" model
"Rboro": 1.20, # Interest factor on borrowing (a < 0)
"Rsave": 1.01, # Interest factor on saving (a > 0)
# Parameters that specify the income distribution over the lifecycle (shared with IndShockConsumerType)
"PermShkStd": [0.1], # Standard deviation of log permanent shocks to income
"PermShkCount": 7, # Number of points in discrete approximation to permanent income shocks
"TranShkStd": [0.2], # Standard deviation of log transitory shocks to income
"TranShkCount": 7, # Number of points in discrete approximation to transitory income shocks
"UnempPrb": 0.05, # Probability of unemployment while working
"IncUnemp": 0.3, # Unemployment benefits replacement rate
"UnempPrbRet": 0.0005, # Probability of "unemployment" while retired
"IncUnempRet": 0.0, # "Unemployment" benefits when retired
"T_retire": 0, # Period of retirement (0 --> no retirement)
"tax_rate": 0.0, # Flat income tax rate (legacy parameter, will be removed in future)
# Parameters for constructing the "assets above minimum" grid (shared with IndShockConsumerType)
"aXtraMin": 0.001, # Minimum end-of-period "assets above minimum" value
"aXtraMax": 20, # Maximum end-of-period "assets above minimum" value
"aXtraCount": 48, # Number of points in the base grid of "assets above minimum"
"aXtraNestFac": 3, # Exponential nesting factor when constructing "assets above minimum" grid
"aXtraExtra": [None], # Additional values to add to aXtraGrid
# A few other paramaters (shared with IndShockConsumerType)
"vFuncBool": True, # Whether to calculate the value function during solution
"CubicBool": False, # Preference shocks currently only compatible with linear cFunc
"T_cycle": 1, # Number of periods in the cycle for this agent type
# Parameters only used in simulation (shared with PerfForesightConsumerType)
"AgentCount": 10000, # Number of agents of this type
"T_sim": 500, # Number of periods to simulate
"aNrmInitMean": -6.0, # Mean of log initial assets
"aNrmInitStd": 1.0, # Standard deviation of log initial assets
"pLvlInitMean": 0.0, # Mean of log initial permanent income
"pLvlInitStd": 0.0, # Standard deviation of log initial permanent income
"PermGroFacAgg": 1.0, # Aggregate permanent income growth factor
"T_age": None, # Age after which simulated agents are automatically killed
}
# %% [markdown]
# ## Solving and examining the solution of the "kinked R" model
#
# The cell below creates an infinite horizon instance of `KinkedRconsumerType` and solves its model by calling its `solve` method.
# %%
KinkyExample = KinkedRconsumerType(**KinkedRdict)
KinkyExample.cycles = 0 # Make the example infinite horizon
KinkyExample.solve()
# %% [markdown]
# An element of a `KinkedRconsumerType`'s solution will have all the same attributes as that of a `IndShockConsumerType`; see that notebook for details.
#
# We can plot the consumption function of our "kinked R" example, as well as the MPC:
# %%
print("Kinked R consumption function:")
plot_funcs(KinkyExample.solution[0].cFunc, KinkyExample.solution[0].mNrmMin, 5)
print("Kinked R marginal propensity to consume:")
plot_funcs_der(KinkyExample.solution[0].cFunc, KinkyExample.solution[0].mNrmMin, 5)
# %% [markdown]
# ## Simulating the "kinked R" model
#
# In order to generate simulated data, an instance of `KinkedRconsumerType` needs to know how many agents there are that share these particular parameters (and are thus *ex ante* homogeneous), the distribution of states for newly "born" agents, and how many periods to simulated. These simulation parameters are described in the table below, along with example values.
#
# | Description | Code | Example value |
# | :---: | --- | --- |
# | Number of consumers of this type | $\texttt{AgentCount}$ | $10000$ |
# | Number of periods to simulate | $\texttt{T_sim}$ | $500$ |
# | Mean of initial log (normalized) assets | $\texttt{aNrmInitMean}$ | $-6.0$ |
# | Stdev of initial log (normalized) assets | $\texttt{aNrmInitStd}$ | $1.0$ |
# | Mean of initial log permanent income | $\texttt{pLvlInitMean}$ | $0.0$ |
# | Stdev of initial log permanent income | $\texttt{pLvlInitStd}$ | $0.0$ |
# | Aggregrate productivity growth factor | $\texttt{PermGroFacAgg}$ | $1.0$ |
# | Age after which consumers are automatically killed | $\texttt{T_age}$ | $None$ |
#
# Here, we will simulate 10,000 consumers for 500 periods. All newly born agents will start with permanent income of exactly $P_t = 1.0 = \exp(\texttt{pLvlInitMean})$, as $\texttt{pLvlInitStd}$ has been set to zero; they will have essentially zero assets at birth, as $\texttt{aNrmInitMean}$ is $-6.0$; assets will be less than $1\%$ of permanent income at birth.
#
# These example parameter values were already passed as part of the parameter dictionary that we used to create `KinkyExample`, so it is ready to simulate. We need to set the `track_vars` attribute to indicate the variables for which we want to record a *history*.
# %%
KinkyExample.track_vars = ['mNrm', 'cNrm', 'pLvl']
KinkyExample.initialize_sim()
KinkyExample.simulate()
# %% [markdown]
# We can plot the average (normalized) market resources in each simulated period:
# %%
plt.plot(np.mean(KinkyExample.history['mNrm'], axis=1))
plt.xlabel("Time")
plt.ylabel("Mean market resources")
plt.show()
# %% [markdown]
# Now let's plot the distribution of (normalized) assets $a_t$ for the current population, after simulating for $500$ periods; this should be fairly close to the long run distribution:
# %%
plt.plot(np.sort(KinkyExample.state_now['aNrm']), np.linspace(0.0, 1.0, KinkyExample.AgentCount))
plt.xlabel("End-of-period assets")
plt.ylabel("Cumulative distribution")
plt.ylim(-0.01, 1.01)
plt.show()
# %% [markdown]
# We can see there's a significant point mass of consumers with *exactly* $a_t=0$; these are consumers who do not find it worthwhile to give up a bit of consumption to begin saving (because $\Rfree_{save}$ is too low), and also are not willing to finance additional consumption by borrowing (because $\Rfree_{boro}$ is too high).
#
# The smaller point masses in this distribution are due to $\texttt{HARK}$ drawing simulated income shocks from the discretized distribution, rather than the "true" lognormal distributions of shocks. For consumers who ended $t-1$ with $a_{t-1}=0$ in assets, there are only 8 values the transitory shock $\theta_{t}$ can take on, and thus only 8 values of $m_t$ thus $a_t$ they can achieve; the value of $\psi_t$ is immaterial to $m_t$ when $a_{t-1}=0$. You can verify this by changing $\texttt{TranShkCount}$ to some higher value, like 25, in the dictionary above, then running the subsequent cells; the smaller point masses will not be visible to the naked eye.
# %%
# %%
# %%
| apache-2.0 |
drasmuss/numpy | numpy/linalg/linalg.py | 11 | 75845 | """Lite version of scipy.linalg.
Notes
-----
This module is a lite version of the linalg.py module in SciPy which
contains high-level Python interface to the LAPACK library. The lite
version only accesses the following LAPACK functions: dgesv, zgesv,
dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf,
zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det',
'svd', 'eig', 'eigh', 'lstsq', 'norm', 'qr', 'cond', 'matrix_rank',
'LinAlgError', 'multi_dot']
import warnings
from numpy.core import (
array, asarray, zeros, empty, empty_like, transpose, intc, single, double,
csingle, cdouble, inexact, complexfloating, newaxis, ravel, all, Inf, dot,
add, multiply, sqrt, maximum, fastCopyAndTranspose, sum, isfinite, size,
finfo, errstate, geterrobj, longdouble, rollaxis, amin, amax, product, abs,
broadcast, atleast_2d, intp, asanyarray, isscalar
)
from numpy.lib import triu, asfarray
from numpy.linalg import lapack_lite, _umath_linalg
from numpy.matrixlib.defmatrix import matrix_power
from numpy.compat import asbytes
# For Python2/3 compatibility
_N = asbytes('N')
_V = asbytes('V')
_A = asbytes('A')
_S = asbytes('S')
_L = asbytes('L')
fortran_int = intc
# Error object
class LinAlgError(Exception):
"""
Generic Python-exception-derived object raised by linalg functions.
General purpose exception class, derived from Python's exception.Exception
class, programmatically raised in linalg functions when a Linear
Algebra-related condition would prevent further correct execution of the
function.
Parameters
----------
None
Examples
--------
>>> from numpy import linalg as LA
>>> LA.inv(np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "...linalg.py", line 350,
in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
File "...linalg.py", line 249,
in solve
raise LinAlgError('Singular matrix')
numpy.linalg.LinAlgError: Singular matrix
"""
pass
# Dealing with errors in _umath_linalg
_linalg_error_extobj = None
def _determine_error_states():
global _linalg_error_extobj
errobj = geterrobj()
bufsize = errobj[0]
with errstate(invalid='call', over='ignore',
divide='ignore', under='ignore'):
invalid_call_errmask = geterrobj()[1]
_linalg_error_extobj = [bufsize, invalid_call_errmask, None]
_determine_error_states()
def _raise_linalgerror_singular(err, flag):
raise LinAlgError("Singular matrix")
def _raise_linalgerror_nonposdef(err, flag):
raise LinAlgError("Matrix is not positive definite")
def _raise_linalgerror_eigenvalues_nonconvergence(err, flag):
raise LinAlgError("Eigenvalues did not converge")
def _raise_linalgerror_svd_nonconvergence(err, flag):
raise LinAlgError("SVD did not converge")
def get_linalg_error_extobj(callback):
extobj = list(_linalg_error_extobj)
extobj[2] = callback
return extobj
def _makearray(a):
new = asarray(a)
wrap = getattr(a, "__array_prepare__", new.__array_wrap__)
return new, wrap
def isComplexType(t):
return issubclass(t, complexfloating)
_real_types_map = {single : single,
double : double,
csingle : single,
cdouble : double}
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _realType(t, default=double):
return _real_types_map.get(t, default)
def _complexType(t, default=cdouble):
return _complex_types_map.get(t, default)
def _linalgRealType(t):
"""Cast the type t to either double or cdouble."""
return double
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _commonType(*arrays):
# in lite version, use higher precision (always double or cdouble)
result_type = single
is_complex = False
for a in arrays:
if issubclass(a.dtype.type, inexact):
if isComplexType(a.dtype.type):
is_complex = True
rt = _realType(a.dtype.type, default=None)
if rt is None:
# unsupported inexact scalar
raise TypeError("array type %s is unsupported in linalg" %
(a.dtype.name,))
else:
rt = double
if rt is double:
result_type = double
if is_complex:
t = cdouble
result_type = _complex_types_map[result_type]
else:
t = double
return t, result_type
# _fastCopyAndTranpose assumes the input is 2D (as all the calls in here are).
_fastCT = fastCopyAndTranspose
def _to_native_byte_order(*arrays):
ret = []
for arr in arrays:
if arr.dtype.byteorder not in ('=', '|'):
ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('=')))
else:
ret.append(arr)
if len(ret) == 1:
return ret[0]
else:
return ret
def _fastCopyAndTranspose(type, *arrays):
cast_arrays = ()
for a in arrays:
if a.dtype.type is type:
cast_arrays = cast_arrays + (_fastCT(a),)
else:
cast_arrays = cast_arrays + (_fastCT(a.astype(type)),)
if len(cast_arrays) == 1:
return cast_arrays[0]
else:
return cast_arrays
def _assertRank2(*arrays):
for a in arrays:
if len(a.shape) != 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'two-dimensional' % len(a.shape))
def _assertRankAtLeast2(*arrays):
for a in arrays:
if len(a.shape) < 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'at least two-dimensional' % len(a.shape))
def _assertSquareness(*arrays):
for a in arrays:
if max(a.shape) != min(a.shape):
raise LinAlgError('Array must be square')
def _assertNdSquareness(*arrays):
for a in arrays:
if max(a.shape[-2:]) != min(a.shape[-2:]):
raise LinAlgError('Last 2 dimensions of the array must be square')
def _assertFinite(*arrays):
for a in arrays:
if not (isfinite(a).all()):
raise LinAlgError("Array must not contain infs or NaNs")
def _assertNoEmpty2d(*arrays):
for a in arrays:
if a.size == 0 and product(a.shape[-2:]) == 0:
raise LinAlgError("Arrays cannot be empty")
# Linear equations
def tensorsolve(a, b, axes=None):
"""
Solve the tensor equation ``a x = b`` for x.
It is assumed that all indices of `x` are summed over in the product,
together with the rightmost indices of `a`, as is done in, for example,
``tensordot(a, x, axes=len(b.shape))``.
Parameters
----------
a : array_like
Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals
the shape of that sub-tensor of `a` consisting of the appropriate
number of its rightmost indices, and must be such that
``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be
'square').
b : array_like
Right-hand tensor, which can be of any shape.
axes : tuple of ints, optional
Axes in `a` to reorder to the right, before inversion.
If None (default), no reordering is done.
Returns
-------
x : ndarray, shape Q
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
tensordot, tensorinv, einsum
Examples
--------
>>> a = np.eye(2*3*4)
>>> a.shape = (2*3, 4, 2, 3, 4)
>>> b = np.random.randn(2*3, 4)
>>> x = np.linalg.tensorsolve(a, b)
>>> x.shape
(2, 3, 4)
>>> np.allclose(np.tensordot(a, x, axes=3), b)
True
"""
a, wrap = _makearray(a)
b = asarray(b)
an = a.ndim
if axes is not None:
allaxes = list(range(0, an))
for k in axes:
allaxes.remove(k)
allaxes.insert(an, k)
a = a.transpose(allaxes)
oldshape = a.shape[-(an-b.ndim):]
prod = 1
for k in oldshape:
prod *= k
a = a.reshape(-1, prod)
b = b.ravel()
res = wrap(solve(a, b))
res.shape = oldshape
return res
def solve(a, b):
"""
Solve a linear matrix equation, or system of linear scalar equations.
Computes the "exact" solution, `x`, of the well-determined, i.e., full
rank, linear matrix equation `ax = b`.
Parameters
----------
a : (..., M, M) array_like
Coefficient matrix.
b : {(..., M,), (..., M, K)}, array_like
Ordinate or "dependent variable" values.
Returns
-------
x : {(..., M,), (..., M, K)} ndarray
Solution to the system a x = b. Returned shape is identical to `b`.
Raises
------
LinAlgError
If `a` is singular or not square.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The solutions are computed using LAPACK routine _gesv
`a` must be square and of full-rank, i.e., all rows (or, equivalently,
columns) must be linearly independent; if either is not true, use
`lstsq` for the least-squares best "solution" of the
system/equation.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 22.
Examples
--------
Solve the system of equations ``3 * x0 + x1 = 9`` and ``x0 + 2 * x1 = 8``:
>>> a = np.array([[3,1], [1,2]])
>>> b = np.array([9,8])
>>> x = np.linalg.solve(a, b)
>>> x
array([ 2., 3.])
Check that the solution is correct:
>>> np.allclose(np.dot(a, x), b)
True
"""
a, _ = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
b, wrap = _makearray(b)
t, result_t = _commonType(a, b)
# We use the b = (..., M,) logic, only if the number of extra dimensions
# match exactly
if b.ndim == a.ndim - 1:
if a.shape[-1] == 0 and b.shape[-1] == 0:
# Legal, but the ufunc cannot handle the 0-sized inner dims
# let the ufunc handle all wrong cases.
a = a.reshape(a.shape[:-1])
bc = broadcast(a, b)
return wrap(empty(bc.shape, dtype=result_t))
gufunc = _umath_linalg.solve1
else:
if b.size == 0:
if (a.shape[-1] == 0 and b.shape[-2] == 0) or b.shape[-1] == 0:
a = a[:,:1].reshape(a.shape[:-1] + (1,))
bc = broadcast(a, b)
return wrap(empty(bc.shape, dtype=result_t))
gufunc = _umath_linalg.solve
signature = 'DD->D' if isComplexType(t) else 'dd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
r = gufunc(a, b, signature=signature, extobj=extobj)
return wrap(r.astype(result_t, copy=False))
def tensorinv(a, ind=2):
"""
Compute the 'inverse' of an N-dimensional array.
The result is an inverse for `a` relative to the tensordot operation
``tensordot(a, b, ind)``, i. e., up to floating-point accuracy,
``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the
tensordot operation.
Parameters
----------
a : array_like
Tensor to 'invert'. Its shape must be 'square', i. e.,
``prod(a.shape[:ind]) == prod(a.shape[ind:])``.
ind : int, optional
Number of first indices that are involved in the inverse sum.
Must be a positive integer, default is 2.
Returns
-------
b : ndarray
`a`'s tensordot inverse, shape ``a.shape[ind:] + a.shape[:ind]``.
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
tensordot, tensorsolve
Examples
--------
>>> a = np.eye(4*6)
>>> a.shape = (4, 6, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=2)
>>> ainv.shape
(8, 3, 4, 6)
>>> b = np.random.randn(4, 6)
>>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b))
True
>>> a = np.eye(4*6)
>>> a.shape = (24, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=1)
>>> ainv.shape
(8, 3, 24)
>>> b = np.random.randn(24)
>>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))
True
"""
a = asarray(a)
oldshape = a.shape
prod = 1
if ind > 0:
invshape = oldshape[ind:] + oldshape[:ind]
for k in oldshape[ind:]:
prod *= k
else:
raise ValueError("Invalid ind argument.")
a = a.reshape(prod, -1)
ia = inv(a)
return ia.reshape(*invshape)
# Matrix inversion
def inv(a):
"""
Compute the (multiplicative) inverse of a matrix.
Given a square matrix `a`, return the matrix `ainv` satisfying
``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``.
Parameters
----------
a : (..., M, M) array_like
Matrix to be inverted.
Returns
-------
ainv : (..., M, M) ndarray or matrix
(Multiplicative) inverse of the matrix `a`.
Raises
------
LinAlgError
If `a` is not square or inversion fails.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
Examples
--------
>>> from numpy.linalg import inv
>>> a = np.array([[1., 2.], [3., 4.]])
>>> ainv = inv(a)
>>> np.allclose(np.dot(a, ainv), np.eye(2))
True
>>> np.allclose(np.dot(ainv, a), np.eye(2))
True
If a is a matrix object, then the return value is a matrix as well:
>>> ainv = inv(np.matrix(a))
>>> ainv
matrix([[-2. , 1. ],
[ 1.5, -0.5]])
Inverses of several matrices can be computed at once:
>>> a = np.array([[[1., 2.], [3., 4.]], [[1, 3], [3, 5]]])
>>> inv(a)
array([[[-2. , 1. ],
[ 1.5, -0.5]],
[[-5. , 2. ],
[ 3. , -1. ]]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
if a.shape[-1] == 0:
# The inner array is 0x0, the ufunc cannot handle this case
return wrap(empty_like(a, dtype=result_t))
signature = 'D->D' if isComplexType(t) else 'd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
ainv = _umath_linalg.inv(a, signature=signature, extobj=extobj)
return wrap(ainv.astype(result_t, copy=False))
# Cholesky decomposition
def cholesky(a):
"""
Cholesky decomposition.
Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`,
where `L` is lower-triangular and .H is the conjugate transpose operator
(which is the ordinary transpose if `a` is real-valued). `a` must be
Hermitian (symmetric if real-valued) and positive-definite. Only `L` is
actually returned.
Parameters
----------
a : (..., M, M) array_like
Hermitian (symmetric if all elements are real), positive-definite
input matrix.
Returns
-------
L : (..., M, M) array_like
Upper or lower-triangular Cholesky factor of `a`. Returns a
matrix object if `a` is a matrix object.
Raises
------
LinAlgError
If the decomposition fails, for example, if `a` is not
positive-definite.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The Cholesky decomposition is often used as a fast way of solving
.. math:: A \\mathbf{x} = \\mathbf{b}
(when `A` is both Hermitian/symmetric and positive-definite).
First, we solve for :math:`\\mathbf{y}` in
.. math:: L \\mathbf{y} = \\mathbf{b},
and then for :math:`\\mathbf{x}` in
.. math:: L.H \\mathbf{x} = \\mathbf{y}.
Examples
--------
>>> A = np.array([[1,-2j],[2j,5]])
>>> A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> L = np.linalg.cholesky(A)
>>> L
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> np.dot(L, L.T.conj()) # verify that L * L.H = A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like?
>>> np.linalg.cholesky(A) # an ndarray object is returned
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> # But a matrix object is returned if A is a matrix object
>>> LA.cholesky(np.matrix(A))
matrix([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
"""
extobj = get_linalg_error_extobj(_raise_linalgerror_nonposdef)
gufunc = _umath_linalg.cholesky_lo
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = gufunc(a, signature=signature, extobj=extobj)
return wrap(r.astype(result_t, copy=False))
# QR decompostion
def qr(a, mode='reduced'):
"""
Compute the qr factorization of a matrix.
Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is
upper-triangular.
Parameters
----------
a : array_like, shape (M, N)
Matrix to be factored.
mode : {'reduced', 'complete', 'r', 'raw', 'full', 'economic'}, optional
If K = min(M, N), then
'reduced' : returns q, r with dimensions (M, K), (K, N) (default)
'complete' : returns q, r with dimensions (M, M), (M, N)
'r' : returns r only with dimensions (K, N)
'raw' : returns h, tau with dimensions (N, M), (K,)
'full' : alias of 'reduced', deprecated
'economic' : returns h from 'raw', deprecated.
The options 'reduced', 'complete, and 'raw' are new in numpy 1.8,
see the notes for more information. The default is 'reduced' and to
maintain backward compatibility with earlier versions of numpy both
it and the old default 'full' can be omitted. Note that array h
returned in 'raw' mode is transposed for calling Fortran. The
'economic' mode is deprecated. The modes 'full' and 'economic' may
be passed using only the first letter for backwards compatibility,
but all others must be spelled out. See the Notes for more
explanation.
Returns
-------
q : ndarray of float or complex, optional
A matrix with orthonormal columns. When mode = 'complete' the
result is an orthogonal/unitary matrix depending on whether or not
a is real/complex. The determinant may be either +/- 1 in that
case.
r : ndarray of float or complex, optional
The upper-triangular matrix.
(h, tau) : ndarrays of np.double or np.cdouble, optional
The array h contains the Householder reflectors that generate q
along with r. The tau array contains scaling factors for the
reflectors. In the deprecated 'economic' mode only h is returned.
Raises
------
LinAlgError
If factoring fails.
Notes
-----
This is an interface to the LAPACK routines dgeqrf, zgeqrf,
dorgqr, and zungqr.
For more information on the qr factorization, see for example:
http://en.wikipedia.org/wiki/QR_factorization
Subclasses of `ndarray` are preserved except for the 'raw' mode. So if
`a` is of type `matrix`, all the return values will be matrices too.
New 'reduced', 'complete', and 'raw' options for mode were added in
Numpy 1.8 and the old option 'full' was made an alias of 'reduced'. In
addition the options 'full' and 'economic' were deprecated. Because
'full' was the previous default and 'reduced' is the new default,
backward compatibility can be maintained by letting `mode` default.
The 'raw' option was added so that LAPACK routines that can multiply
arrays by q using the Householder reflectors can be used. Note that in
this case the returned arrays are of type np.double or np.cdouble and
the h array is transposed to be FORTRAN compatible. No routines using
the 'raw' return are currently exposed by numpy, but some are available
in lapack_lite and just await the necessary work.
Examples
--------
>>> a = np.random.randn(9, 6)
>>> q, r = np.linalg.qr(a)
>>> np.allclose(a, np.dot(q, r)) # a does equal qr
True
>>> r2 = np.linalg.qr(a, mode='r')
>>> r3 = np.linalg.qr(a, mode='economic')
>>> np.allclose(r, r2) # mode='r' returns the same r as mode='full'
True
>>> # But only triu parts are guaranteed equal when mode='economic'
>>> np.allclose(r, np.triu(r3[:6,:6], k=0))
True
Example illustrating a common use of `qr`: solving of least squares
problems
What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for
the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points
and you'll see that it should be y0 = 0, m = 1.) The answer is provided
by solving the over-determined matrix equation ``Ax = b``, where::
A = array([[0, 1], [1, 1], [1, 1], [2, 1]])
x = array([[y0], [m]])
b = array([[1], [0], [2], [1]])
If A = qr such that q is orthonormal (which is always possible via
Gram-Schmidt), then ``x = inv(r) * (q.T) * b``. (In numpy practice,
however, we simply use `lstsq`.)
>>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]])
>>> A
array([[0, 1],
[1, 1],
[1, 1],
[2, 1]])
>>> b = np.array([1, 0, 2, 1])
>>> q, r = LA.qr(A)
>>> p = np.dot(q.T, b)
>>> np.dot(LA.inv(r), p)
array([ 1.1e-16, 1.0e+00])
"""
if mode not in ('reduced', 'complete', 'r', 'raw'):
if mode in ('f', 'full'):
# 2013-04-01, 1.8
msg = "".join((
"The 'full' option is deprecated in favor of 'reduced'.\n",
"For backward compatibility let mode default."))
warnings.warn(msg, DeprecationWarning)
mode = 'reduced'
elif mode in ('e', 'economic'):
# 2013-04-01, 1.8
msg = "The 'economic' option is deprecated.",
warnings.warn(msg, DeprecationWarning)
mode = 'economic'
else:
raise ValueError("Unrecognized mode '%s'" % mode)
a, wrap = _makearray(a)
_assertRank2(a)
_assertNoEmpty2d(a)
m, n = a.shape
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
mn = min(m, n)
tau = zeros((mn,), t)
if isComplexType(t):
lapack_routine = lapack_lite.zgeqrf
routine_name = 'zgeqrf'
else:
lapack_routine = lapack_lite.dgeqrf
routine_name = 'dgeqrf'
# calculate optimal size of work data 'work'
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# do qr decomposition
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# handle modes that don't return q
if mode == 'r':
r = _fastCopyAndTranspose(result_t, a[:, :mn])
return wrap(triu(r))
if mode == 'raw':
return a, tau
if mode == 'economic':
if t != result_t :
a = a.astype(result_t, copy=False)
return wrap(a.T)
# generate q from a
if mode == 'complete' and m > n:
mc = m
q = empty((m, m), t)
else:
mc = mn
q = empty((n, m), t)
q[:n] = a
if isComplexType(t):
lapack_routine = lapack_lite.zungqr
routine_name = 'zungqr'
else:
lapack_routine = lapack_lite.dorgqr
routine_name = 'dorgqr'
# determine optimal lwork
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# compute q
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
q = _fastCopyAndTranspose(result_t, q[:mc])
r = _fastCopyAndTranspose(result_t, a[:, :mc])
return wrap(q), wrap(triu(r))
# Eigenvalues
def eigvals(a):
"""
Compute the eigenvalues of a general matrix.
Main difference between `eigvals` and `eig`: the eigenvectors aren't
returned.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues will be computed.
Returns
-------
w : (..., M,) ndarray
The eigenvalues, each repeated according to its multiplicity.
They are not necessarily ordered, nor are they necessarily
real for real matrices.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eig : eigenvalues and right eigenvectors of general arrays
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the _geev LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
Examples
--------
Illustration, using the fact that the eigenvalues of a diagonal matrix
are its diagonal elements, that multiplying a matrix on the left
by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose
of `Q`), preserves the eigenvalues of the "middle" matrix. In other words,
if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as
``A``:
>>> from numpy import linalg as LA
>>> x = np.random.random()
>>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]])
>>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :])
(1.0, 1.0, 0.0)
Now multiply a diagonal matrix by Q on one side and by Q.T on the other:
>>> D = np.diag((-1,1))
>>> LA.eigvals(D)
array([-1., 1.])
>>> A = np.dot(Q, D)
>>> A = np.dot(A, Q.T)
>>> LA.eigvals(A)
array([ 1., -1.])
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->D' if isComplexType(t) else 'd->D'
w = _umath_linalg.eigvals(a, signature=signature, extobj=extobj)
if not isComplexType(t):
if all(w.imag == 0):
w = w.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
return w.astype(result_t, copy=False)
def eigvalsh(a, UPLO='L'):
"""
Compute the eigenvalues of a Hermitian or real symmetric matrix.
Main difference from eigh: the eigenvectors are not computed.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues are to be
computed.
UPLO : {'L', 'U'}, optional
Same as `lower`, with 'L' for lower and 'U' for upper triangular.
Deprecated.
Returns
-------
w : (..., M,) ndarray
The eigenvalues in ascending order, each repeated according to
its multiplicity.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
eigvals : eigenvalues of general real or complex arrays.
eig : eigenvalues and right eigenvectors of general real or complex
arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues are computed using LAPACK routines _syevd, _heevd
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> LA.eigvalsh(a)
array([ 0.17157288, 5.82842712])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigvalsh_lo
else:
gufunc = _umath_linalg.eigvalsh_up
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->d' if isComplexType(t) else 'd->d'
w = gufunc(a, signature=signature, extobj=extobj)
return w.astype(_realType(result_t), copy=False)
def _convertarray(a):
t, result_t = _commonType(a)
a = _fastCT(a.astype(t))
return a, t, result_t
# Eigenvectors
def eig(a):
"""
Compute the eigenvalues and right eigenvectors of a square array.
Parameters
----------
a : (..., M, M) array
Matrices for which the eigenvalues and right eigenvectors will
be computed
Returns
-------
w : (..., M) array
The eigenvalues, each repeated according to its multiplicity.
The eigenvalues are not necessarily ordered. The resulting
array will be of complex type, unless the imaginary part is
zero in which case it will be cast to a real type. When `a`
is real the resulting eigenvalues will be real (0 imaginary
part) or occur in conjugate pairs
v : (..., M, M) array
The normalized (unit "length") eigenvectors, such that the
column ``v[:,i]`` is the eigenvector corresponding to the
eigenvalue ``w[i]``.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvals : eigenvalues of a non-symmetric array.
eigh : eigenvalues and eigenvectors of a symmetric or Hermitian
(conjugate symmetric) array.
eigvalsh : eigenvalues of a symmetric or Hermitian (conjugate symmetric)
array.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the _geev LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
The number `w` is an eigenvalue of `a` if there exists a vector
`v` such that ``dot(a,v) = w * v``. Thus, the arrays `a`, `w`, and
`v` satisfy the equations ``dot(a[:,:], v[:,i]) = w[i] * v[:,i]``
for :math:`i \\in \\{0,...,M-1\\}`.
The array `v` of eigenvectors may not be of maximum rank, that is, some
of the columns may be linearly dependent, although round-off error may
obscure that fact. If the eigenvalues are all different, then theoretically
the eigenvectors are linearly independent. Likewise, the (complex-valued)
matrix of eigenvectors `v` is unitary if the matrix `a` is normal, i.e.,
if ``dot(a, a.H) = dot(a.H, a)``, where `a.H` denotes the conjugate
transpose of `a`.
Finally, it is emphasized that `v` consists of the *right* (as in
right-hand side) eigenvectors of `a`. A vector `y` satisfying
``dot(y.T, a) = z * y.T`` for some number `z` is called a *left*
eigenvector of `a`, and, in general, the left and right eigenvectors
of a matrix are not necessarily the (perhaps conjugate) transposes
of each other.
References
----------
G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL,
Academic Press, Inc., 1980, Various pp.
Examples
--------
>>> from numpy import linalg as LA
(Almost) trivial example with real e-values and e-vectors.
>>> w, v = LA.eig(np.diag((1, 2, 3)))
>>> w; v
array([ 1., 2., 3.])
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
Real matrix possessing complex e-values and e-vectors; note that the
e-values are complex conjugates of each other.
>>> w, v = LA.eig(np.array([[1, -1], [1, 1]]))
>>> w; v
array([ 1. + 1.j, 1. - 1.j])
array([[ 0.70710678+0.j , 0.70710678+0.j ],
[ 0.00000000-0.70710678j, 0.00000000+0.70710678j]])
Complex-valued matrix with real e-values (but complex-valued e-vectors);
note that a.conj().T = a, i.e., a is Hermitian.
>>> a = np.array([[1, 1j], [-1j, 1]])
>>> w, v = LA.eig(a)
>>> w; v
array([ 2.00000000e+00+0.j, 5.98651912e-36+0.j]) # i.e., {2, 0}
array([[ 0.00000000+0.70710678j, 0.70710678+0.j ],
[ 0.70710678+0.j , 0.00000000+0.70710678j]])
Be careful about round-off error!
>>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]])
>>> # Theor. e-values are 1 +/- 1e-9
>>> w, v = LA.eig(a)
>>> w; v
array([ 1., 1.])
array([[ 1., 0.],
[ 0., 1.]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->DD' if isComplexType(t) else 'd->DD'
w, vt = _umath_linalg.eig(a, signature=signature, extobj=extobj)
if not isComplexType(t) and all(w.imag == 0.0):
w = w.real
vt = vt.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
vt = vt.astype(result_t, copy=False)
return w.astype(result_t, copy=False), wrap(vt)
def eigh(a, UPLO='L'):
"""
Return the eigenvalues and eigenvectors of a Hermitian or symmetric matrix.
Returns two objects, a 1-D array containing the eigenvalues of `a`, and
a 2-D square array or matrix (depending on the input type) of the
corresponding eigenvectors (in columns).
Parameters
----------
a : (..., M, M) array
Hermitian/Symmetric matrices whose eigenvalues and
eigenvectors are to be computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Returns
-------
w : (..., M) ndarray
The eigenvalues in ascending order, each repeated according to
its multiplicity.
v : {(..., M, M) ndarray, (..., M, M) matrix}
The column ``v[:, i]`` is the normalized eigenvector corresponding
to the eigenvalue ``w[i]``. Will return a matrix object if `a` is
a matrix object.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eig : eigenvalues and right eigenvectors for non-symmetric arrays.
eigvals : eigenvalues of non-symmetric arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues/eigenvectors are computed using LAPACK routines _syevd,
_heevd
The eigenvalues of real symmetric or complex Hermitian matrices are
always real. [1]_ The array `v` of (column) eigenvectors is unitary
and `a`, `w`, and `v` satisfy the equations
``dot(a, v[:, i]) = w[i] * v[:, i]``.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 222.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> a
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(a)
>>> w; v
array([ 0.17157288, 5.82842712])
array([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
>>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair
array([2.77555756e-17 + 0.j, 0. + 1.38777878e-16j])
>>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair
array([ 0.+0.j, 0.+0.j])
>>> A = np.matrix(a) # what happens if input is a matrix object
>>> A
matrix([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(A)
>>> w; v
array([ 0.17157288, 5.82842712])
matrix([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigh_lo
else:
gufunc = _umath_linalg.eigh_up
signature = 'D->dD' if isComplexType(t) else 'd->dd'
w, vt = gufunc(a, signature=signature, extobj=extobj)
w = w.astype(_realType(result_t), copy=False)
vt = vt.astype(result_t, copy=False)
return w, wrap(vt)
# Singular value decomposition
def svd(a, full_matrices=1, compute_uv=1):
"""
Singular Value Decomposition.
Factors the matrix `a` as ``u * np.diag(s) * v``, where `u` and `v`
are unitary and `s` is a 1-d array of `a`'s singular values.
Parameters
----------
a : (..., M, N) array_like
A real or complex matrix of shape (`M`, `N`) .
full_matrices : bool, optional
If True (default), `u` and `v` have the shapes (`M`, `M`) and
(`N`, `N`), respectively. Otherwise, the shapes are (`M`, `K`)
and (`K`, `N`), respectively, where `K` = min(`M`, `N`).
compute_uv : bool, optional
Whether or not to compute `u` and `v` in addition to `s`. True
by default.
Returns
-------
u : { (..., M, M), (..., M, K) } array
Unitary matrices. The actual shape depends on the value of
``full_matrices``. Only returned when ``compute_uv`` is True.
s : (..., K) array
The singular values for every matrix, sorted in descending order.
v : { (..., N, N), (..., K, N) } array
Unitary matrices. The actual shape depends on the value of
``full_matrices``. Only returned when ``compute_uv`` is True.
Raises
------
LinAlgError
If SVD computation does not converge.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The decomposition is performed using LAPACK routine _gesdd
The SVD is commonly written as ``a = U S V.H``. The `v` returned
by this function is ``V.H`` and ``u = U``.
If ``U`` is a unitary matrix, it means that it
satisfies ``U.H = inv(U)``.
The rows of `v` are the eigenvectors of ``a.H a``. The columns
of `u` are the eigenvectors of ``a a.H``. For row ``i`` in
`v` and column ``i`` in `u`, the corresponding eigenvalue is
``s[i]**2``.
If `a` is a `matrix` object (as opposed to an `ndarray`), then so
are all the return values.
Examples
--------
>>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)
Reconstruction based on full SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=True)
>>> U.shape, V.shape, s.shape
((9, 9), (6, 6), (6,))
>>> S = np.zeros((9, 6), dtype=complex)
>>> S[:6, :6] = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
Reconstruction based on reduced SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=False)
>>> U.shape, V.shape, s.shape
((9, 6), (6, 6), (6,))
>>> S = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence)
m = a.shape[-2]
n = a.shape[-1]
if compute_uv:
if full_matrices:
if m < n:
gufunc = _umath_linalg.svd_m_f
else:
gufunc = _umath_linalg.svd_n_f
else:
if m < n:
gufunc = _umath_linalg.svd_m_s
else:
gufunc = _umath_linalg.svd_n_s
signature = 'D->DdD' if isComplexType(t) else 'd->ddd'
u, s, vt = gufunc(a, signature=signature, extobj=extobj)
u = u.astype(result_t, copy=False)
s = s.astype(_realType(result_t), copy=False)
vt = vt.astype(result_t, copy=False)
return wrap(u), s, wrap(vt)
else:
if m < n:
gufunc = _umath_linalg.svd_m
else:
gufunc = _umath_linalg.svd_n
signature = 'D->d' if isComplexType(t) else 'd->d'
s = gufunc(a, signature=signature, extobj=extobj)
s = s.astype(_realType(result_t), copy=False)
return s
def cond(x, p=None):
"""
Compute the condition number of a matrix.
This function is capable of returning the condition number using
one of seven different norms, depending on the value of `p` (see
Parameters below).
Parameters
----------
x : (..., M, N) array_like
The matrix whose condition number is sought.
p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional
Order of the norm:
===== ============================
p norm for matrices
===== ============================
None 2-norm, computed directly using the ``SVD``
'fro' Frobenius norm
inf max(sum(abs(x), axis=1))
-inf min(sum(abs(x), axis=1))
1 max(sum(abs(x), axis=0))
-1 min(sum(abs(x), axis=0))
2 2-norm (largest sing. value)
-2 smallest singular value
===== ============================
inf means the numpy.inf object, and the Frobenius norm is
the root-of-sum-of-squares norm.
Returns
-------
c : {float, inf}
The condition number of the matrix. May be infinite.
See Also
--------
numpy.linalg.norm
Notes
-----
The condition number of `x` is defined as the norm of `x` times the
norm of the inverse of `x` [1]_; the norm can be the usual L2-norm
(root-of-sum-of-squares) or one of a number of other matrix norms.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL,
Academic Press, Inc., 1980, pg. 285.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]])
>>> a
array([[ 1, 0, -1],
[ 0, 1, 0],
[ 1, 0, 1]])
>>> LA.cond(a)
1.4142135623730951
>>> LA.cond(a, 'fro')
3.1622776601683795
>>> LA.cond(a, np.inf)
2.0
>>> LA.cond(a, -np.inf)
1.0
>>> LA.cond(a, 1)
2.0
>>> LA.cond(a, -1)
1.0
>>> LA.cond(a, 2)
1.4142135623730951
>>> LA.cond(a, -2)
0.70710678118654746
>>> min(LA.svd(a, compute_uv=0))*min(LA.svd(LA.inv(a), compute_uv=0))
0.70710678118654746
"""
x = asarray(x) # in case we have a matrix
if p is None:
s = svd(x, compute_uv=False)
return s[..., 0]/s[..., -1]
else:
return norm(x, p, axis=(-2, -1)) * norm(inv(x), p, axis=(-2, -1))
def matrix_rank(M, tol=None):
"""
Return matrix rank of array using SVD method
Rank of the array is the number of SVD singular values of the array that are
greater than `tol`.
Parameters
----------
M : {(M,), (M, N)} array_like
array of <=2 dimensions
tol : {None, float}, optional
threshold below which SVD values are considered zero. If `tol` is
None, and ``S`` is an array with singular values for `M`, and
``eps`` is the epsilon value for datatype of ``S``, then `tol` is
set to ``S.max() * max(M.shape) * eps``.
Notes
-----
The default threshold to detect rank deficiency is a test on the magnitude
of the singular values of `M`. By default, we identify singular values less
than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with
the symbols defined above). This is the algorithm MATLAB uses [1]. It also
appears in *Numerical recipes* in the discussion of SVD solutions for linear
least squares [2].
This default threshold is designed to detect rank deficiency accounting for
the numerical errors of the SVD computation. Imagine that there is a column
in `M` that is an exact (in floating point) linear combination of other
columns in `M`. Computing the SVD on `M` will not produce a singular value
exactly equal to 0 in general: any difference of the smallest SVD value from
0 will be caused by numerical imprecision in the calculation of the SVD.
Our threshold for small SVD values takes this numerical imprecision into
account, and the default threshold will detect such numerical rank
deficiency. The threshold may declare a matrix `M` rank deficient even if
the linear combination of some columns of `M` is not exactly equal to
another column of `M` but only numerically very close to another column of
`M`.
We chose our default threshold because it is in wide use. Other thresholds
are possible. For example, elsewhere in the 2007 edition of *Numerical
recipes* there is an alternative threshold of ``S.max() *
np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe
this threshold as being based on "expected roundoff error" (p 71).
The thresholds above deal with floating point roundoff error in the
calculation of the SVD. However, you may have more information about the
sources of error in `M` that would make you consider other tolerance values
to detect *effective* rank deficiency. The most useful measure of the
tolerance depends on the operations you intend to use on your matrix. For
example, if your data come from uncertain measurements with uncertainties
greater than floating point epsilon, choosing a tolerance near that
uncertainty may be preferable. The tolerance may be absolute if the
uncertainties are absolute rather than relative.
References
----------
.. [1] MATLAB reference documention, "Rank"
http://www.mathworks.com/help/techdoc/ref/rank.html
.. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery,
"Numerical Recipes (3rd edition)", Cambridge University Press, 2007,
page 795.
Examples
--------
>>> from numpy.linalg import matrix_rank
>>> matrix_rank(np.eye(4)) # Full rank matrix
4
>>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
>>> matrix_rank(I)
3
>>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
1
>>> matrix_rank(np.zeros((4,)))
0
"""
M = asarray(M)
if M.ndim > 2:
raise TypeError('array should have 2 or fewer dimensions')
if M.ndim < 2:
return int(not all(M==0))
S = svd(M, compute_uv=False)
if tol is None:
tol = S.max() * max(M.shape) * finfo(S.dtype).eps
return sum(S > tol)
# Generalized inverse
def pinv(a, rcond=1e-15 ):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate the generalized inverse of a matrix using its
singular-value decomposition (SVD) and including all
*large* singular values.
Parameters
----------
a : (M, N) array_like
Matrix to be pseudo-inverted.
rcond : float
Cutoff for small singular values.
Singular values smaller (in modulus) than
`rcond` * largest_singular_value (again, in modulus)
are set to zero.
Returns
-------
B : (N, M) ndarray
The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
is `B`.
Raises
------
LinAlgError
If the SVD computation does not converge.
Notes
-----
The pseudo-inverse of a matrix A, denoted :math:`A^+`, is
defined as: "the matrix that 'solves' [the least-squares problem]
:math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then
:math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`.
It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular
value decomposition of A, then
:math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are
orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting
of A's so-called singular values, (followed, typically, by
zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix
consisting of the reciprocals of A's singular values
(again, followed by zeros). [1]_
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pp. 139-142.
Examples
--------
The following example checks that ``a * a+ * a == a`` and
``a+ * a * a+ == a+``:
>>> a = np.random.randn(9, 6)
>>> B = np.linalg.pinv(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
a = a.conjugate()
u, s, vt = svd(a, 0)
m = u.shape[0]
n = vt.shape[1]
cutoff = rcond*maximum.reduce(s)
for i in range(min(n, m)):
if s[i] > cutoff:
s[i] = 1./s[i]
else:
s[i] = 0.
res = dot(transpose(vt), multiply(s[:, newaxis], transpose(u)))
return wrap(res)
# Determinant
def slogdet(a):
"""
Compute the sign and (natural) logarithm of the determinant of an array.
If an array has a very small or very large determinant, then a call to
`det` may overflow or underflow. This routine is more robust against such
issues, because it computes the logarithm of the determinant rather than
the determinant itself.
Parameters
----------
a : (..., M, M) array_like
Input array, has to be a square 2-D array.
Returns
-------
sign : (...) array_like
A number representing the sign of the determinant. For a real matrix,
this is 1, 0, or -1. For a complex matrix, this is a complex number
with absolute value 1 (i.e., it is on the unit circle), or else 0.
logdet : (...) array_like
The natural log of the absolute value of the determinant.
If the determinant is zero, then `sign` will be 0 and `logdet` will be
-Inf. In all cases, the determinant is equal to ``sign * np.exp(logdet)``.
See Also
--------
det
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
.. versionadded:: 1.6.0.
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``:
>>> a = np.array([[1, 2], [3, 4]])
>>> (sign, logdet) = np.linalg.slogdet(a)
>>> (sign, logdet)
(-1, 0.69314718055994529)
>>> sign * np.exp(logdet)
-2.0
Computing log-determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> sign, logdet = np.linalg.slogdet(a)
>>> (sign, logdet)
(array([-1., -1., -1.]), array([ 0.69314718, 1.09861229, 2.07944154]))
>>> sign * np.exp(logdet)
array([-2., -3., -8.])
This routine succeeds where ordinary `det` does not:
>>> np.linalg.det(np.eye(500) * 0.1)
0.0
>>> np.linalg.slogdet(np.eye(500) * 0.1)
(1, -1151.2925464970228)
"""
a = asarray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
real_t = _realType(result_t)
signature = 'D->Dd' if isComplexType(t) else 'd->dd'
sign, logdet = _umath_linalg.slogdet(a, signature=signature)
if isscalar(sign):
sign = sign.astype(result_t)
else:
sign = sign.astype(result_t, copy=False)
if isscalar(logdet):
logdet = logdet.astype(real_t)
else:
logdet = logdet.astype(real_t, copy=False)
return sign, logdet
def det(a):
"""
Compute the determinant of an array.
Parameters
----------
a : (..., M, M) array_like
Input array to compute determinants for.
Returns
-------
det : (...) array_like
Determinant of `a`.
See Also
--------
slogdet : Another way to representing the determinant, more suitable
for large matrices where underflow/overflow may occur.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:
>>> a = np.array([[1, 2], [3, 4]])
>>> np.linalg.det(a)
-2.0
Computing determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> np.linalg.det(a)
array([-2., -3., -8.])
"""
a = asarray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = _umath_linalg.det(a, signature=signature)
if isscalar(r):
r = r.astype(result_t)
else:
r = r.astype(result_t, copy=False)
return r
# Linear Least Squares
def lstsq(a, b, rcond=-1):
"""
Return the least-squares solution to a linear matrix equation.
Solves the equation `a x = b` by computing a vector `x` that
minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may
be under-, well-, or over- determined (i.e., the number of
linearly independent rows of `a` can be less than, equal to, or
greater than its number of linearly independent columns). If `a`
is square and of full rank, then `x` (but for round-off error) is
the "exact" solution of the equation.
Parameters
----------
a : (M, N) array_like
"Coefficient" matrix.
b : {(M,), (M, K)} array_like
Ordinate or "dependent variable" values. If `b` is two-dimensional,
the least-squares solution is calculated for each of the `K` columns
of `b`.
rcond : float, optional
Cut-off ratio for small singular values of `a`.
Singular values are set to zero if they are smaller than `rcond`
times the largest singular value of `a`.
Returns
-------
x : {(N,), (N, K)} ndarray
Least-squares solution. If `b` is two-dimensional,
the solutions are in the `K` columns of `x`.
residuals : {(), (1,), (K,)} ndarray
Sums of residuals; squared Euclidean 2-norm for each column in
``b - a*x``.
If the rank of `a` is < N or M <= N, this is an empty array.
If `b` is 1-dimensional, this is a (1,) shape array.
Otherwise the shape is (K,).
rank : int
Rank of matrix `a`.
s : (min(M, N),) ndarray
Singular values of `a`.
Raises
------
LinAlgError
If computation does not converge.
Notes
-----
If `b` is a matrix, then all array results are returned as matrices.
Examples
--------
Fit a line, ``y = mx + c``, through some noisy data-points:
>>> x = np.array([0, 1, 2, 3])
>>> y = np.array([-1, 0.2, 0.9, 2.1])
By examining the coefficients, we see that the line should have a
gradient of roughly 1 and cut the y-axis at, more or less, -1.
We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]``
and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`:
>>> A = np.vstack([x, np.ones(len(x))]).T
>>> A
array([[ 0., 1.],
[ 1., 1.],
[ 2., 1.],
[ 3., 1.]])
>>> m, c = np.linalg.lstsq(A, y)[0]
>>> print(m, c)
1.0 -0.95
Plot the data along with the fitted line:
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o', label='Original data', markersize=10)
>>> plt.plot(x, m*x + c, 'r', label='Fitted line')
>>> plt.legend()
>>> plt.show()
"""
import math
a, _ = _makearray(a)
b, wrap = _makearray(b)
is_1d = len(b.shape) == 1
if is_1d:
b = b[:, newaxis]
_assertRank2(a, b)
m = a.shape[0]
n = a.shape[1]
n_rhs = b.shape[1]
ldb = max(n, m)
if m != b.shape[0]:
raise LinAlgError('Incompatible dimensions')
t, result_t = _commonType(a, b)
result_real_t = _realType(result_t)
real_t = _linalgRealType(t)
bstar = zeros((ldb, n_rhs), t)
bstar[:b.shape[0], :n_rhs] = b.copy()
a, bstar = _fastCopyAndTranspose(t, a, bstar)
a, bstar = _to_native_byte_order(a, bstar)
s = zeros((min(m, n),), real_t)
nlvl = max( 0, int( math.log( float(min(m, n))/2. ) ) + 1 )
iwork = zeros((3*min(m, n)*nlvl+11*min(m, n),), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zgelsd
lwork = 1
rwork = zeros((lwork,), real_t)
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, rwork, iwork, 0)
lwork = int(abs(work[0]))
rwork = zeros((lwork,), real_t)
a_real = zeros((m, n), real_t)
bstar_real = zeros((ldb, n_rhs,), real_t)
results = lapack_lite.dgelsd(m, n, n_rhs, a_real, m,
bstar_real, ldb, s, rcond,
0, rwork, -1, iwork, 0)
lrwork = int(rwork[0])
work = zeros((lwork,), t)
rwork = zeros((lrwork,), real_t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, rwork, iwork, 0)
else:
lapack_routine = lapack_lite.dgelsd
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, iwork, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, iwork, 0)
if results['info'] > 0:
raise LinAlgError('SVD did not converge in Linear Least Squares')
resids = array([], result_real_t)
if is_1d:
x = array(ravel(bstar)[:n], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = array([sum(abs(ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
resids = array([sum((ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
x = array(transpose(bstar)[:n,:], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = sum(abs(transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t, copy=False)
else:
resids = sum((transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t, copy=False)
st = s[:min(n, m)].astype(result_real_t, copy=True)
return wrap(x), wrap(resids), results['rank'], st
def _multi_svd_norm(x, row_axis, col_axis, op):
"""Compute a function of the singular values of the 2-D matrices in `x`.
This is a private utility function used by numpy.linalg.norm().
Parameters
----------
x : ndarray
row_axis, col_axis : int
The axes of `x` that hold the 2-D matrices.
op : callable
This should be either numpy.amin or numpy.amax or numpy.sum.
Returns
-------
result : float or ndarray
If `x` is 2-D, the return values is a float.
Otherwise, it is an array with ``x.ndim - 2`` dimensions.
The return values are either the minimum or maximum or sum of the
singular values of the matrices, depending on whether `op`
is `numpy.amin` or `numpy.amax` or `numpy.sum`.
"""
if row_axis > col_axis:
row_axis -= 1
y = rollaxis(rollaxis(x, col_axis, x.ndim), row_axis, -1)
result = op(svd(y, compute_uv=0), axis=-1)
return result
def norm(x, ord=None, axis=None, keepdims=False):
"""
Matrix or vector norm.
This function is able to return one of eight different matrix norms,
or one of an infinite number of vector norms (described below), depending
on the value of the ``ord`` parameter.
Parameters
----------
x : array_like
Input array. If `axis` is None, `x` must be 1-D or 2-D.
ord : {non-zero int, inf, -inf, 'fro', 'nuc'}, optional
Order of the norm (see table under ``Notes``). inf means numpy's
`inf` object.
axis : {int, 2-tuple of ints, None}, optional
If `axis` is an integer, it specifies the axis of `x` along which to
compute the vector norms. If `axis` is a 2-tuple, it specifies the
axes that hold 2-D matrices, and the matrix norms of these matrices
are computed. If `axis` is None then either a vector norm (when `x`
is 1-D) or a matrix norm (when `x` is 2-D) is returned.
keepdims : bool, optional
If this is set to True, the axes which are normed over are left in the
result as dimensions with size one. With this option the result will
broadcast correctly against the original `x`.
.. versionadded:: 1.10.0
Returns
-------
n : float or ndarray
Norm of the matrix or vector(s).
Notes
-----
For values of ``ord <= 0``, the result is, strictly speaking, not a
mathematical 'norm', but it may still be useful for various numerical
purposes.
The following norms can be calculated:
===== ============================ ==========================
ord norm for matrices norm for vectors
===== ============================ ==========================
None Frobenius norm 2-norm
'fro' Frobenius norm --
'nuc' nuclear norm --
inf max(sum(abs(x), axis=1)) max(abs(x))
-inf min(sum(abs(x), axis=1)) min(abs(x))
0 -- sum(x != 0)
1 max(sum(abs(x), axis=0)) as below
-1 min(sum(abs(x), axis=0)) as below
2 2-norm (largest sing. value) as below
-2 smallest singular value as below
other -- sum(abs(x)**ord)**(1./ord)
===== ============================ ==========================
The Frobenius norm is given by [1]_:
:math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
The nuclear norm is the sum of the singular values.
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.arange(9) - 4
>>> a
array([-4, -3, -2, -1, 0, 1, 2, 3, 4])
>>> b = a.reshape((3, 3))
>>> b
array([[-4, -3, -2],
[-1, 0, 1],
[ 2, 3, 4]])
>>> LA.norm(a)
7.745966692414834
>>> LA.norm(b)
7.745966692414834
>>> LA.norm(b, 'fro')
7.745966692414834
>>> LA.norm(a, np.inf)
4.0
>>> LA.norm(b, np.inf)
9.0
>>> LA.norm(a, -np.inf)
0.0
>>> LA.norm(b, -np.inf)
2.0
>>> LA.norm(a, 1)
20.0
>>> LA.norm(b, 1)
7.0
>>> LA.norm(a, -1)
-4.6566128774142013e-010
>>> LA.norm(b, -1)
6.0
>>> LA.norm(a, 2)
7.745966692414834
>>> LA.norm(b, 2)
7.3484692283495345
>>> LA.norm(a, -2)
nan
>>> LA.norm(b, -2)
1.8570331885190563e-016
>>> LA.norm(a, 3)
5.8480354764257312
>>> LA.norm(a, -3)
nan
Using the `axis` argument to compute vector norms:
>>> c = np.array([[ 1, 2, 3],
... [-1, 1, 4]])
>>> LA.norm(c, axis=0)
array([ 1.41421356, 2.23606798, 5. ])
>>> LA.norm(c, axis=1)
array([ 3.74165739, 4.24264069])
>>> LA.norm(c, ord=1, axis=1)
array([ 6., 6.])
Using the `axis` argument to compute matrix norms:
>>> m = np.arange(8).reshape(2,2,2)
>>> LA.norm(m, axis=(1,2))
array([ 3.74165739, 11.22497216])
>>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :])
(3.7416573867739413, 11.224972160321824)
"""
x = asarray(x)
if not issubclass(x.dtype.type, inexact):
x = x.astype(float)
# Immediately handle some default, simple, fast, and common cases.
if axis is None:
ndim = x.ndim
if ((ord is None) or
(ord in ('f', 'fro') and ndim == 2) or
(ord == 2 and ndim == 1)):
x = x.ravel(order='K')
if isComplexType(x.dtype.type):
sqnorm = dot(x.real, x.real) + dot(x.imag, x.imag)
else:
sqnorm = dot(x, x)
ret = sqrt(sqnorm)
if keepdims:
ret = ret.reshape(ndim*[1])
return ret
# Normalize the `axis` argument to a tuple.
nd = x.ndim
if axis is None:
axis = tuple(range(nd))
elif not isinstance(axis, tuple):
try:
axis = int(axis)
except:
raise TypeError("'axis' must be None, an integer or a tuple of integers")
axis = (axis,)
if len(axis) == 1:
if ord == Inf:
return abs(x).max(axis=axis, keepdims=keepdims)
elif ord == -Inf:
return abs(x).min(axis=axis, keepdims=keepdims)
elif ord == 0:
# Zero norm
return (x != 0).astype(float).sum(axis=axis, keepdims=keepdims)
elif ord == 1:
# special case for speedup
return add.reduce(abs(x), axis=axis, keepdims=keepdims)
elif ord is None or ord == 2:
# special case for speedup
s = (x.conj() * x).real
return sqrt(add.reduce(s, axis=axis, keepdims=keepdims))
else:
try:
ord + 1
except TypeError:
raise ValueError("Invalid norm order for vectors.")
if x.dtype.type is longdouble:
# Convert to a float type, so integer arrays give
# float results. Don't apply asfarray to longdouble arrays,
# because it will downcast to float64.
absx = abs(x)
else:
absx = x if isComplexType(x.dtype.type) else asfarray(x)
if absx.dtype is x.dtype:
absx = abs(absx)
else:
# if the type changed, we can safely overwrite absx
abs(absx, out=absx)
absx **= ord
return add.reduce(absx, axis=axis, keepdims=keepdims) ** (1.0 / ord)
elif len(axis) == 2:
row_axis, col_axis = axis
if row_axis < 0:
row_axis += nd
if col_axis < 0:
col_axis += nd
if not (0 <= row_axis < nd and 0 <= col_axis < nd):
raise ValueError('Invalid axis %r for an array with shape %r' %
(axis, x.shape))
if row_axis == col_axis:
raise ValueError('Duplicate axes given.')
if ord == 2:
ret = _multi_svd_norm(x, row_axis, col_axis, amax)
elif ord == -2:
ret = _multi_svd_norm(x, row_axis, col_axis, amin)
elif ord == 1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).max(axis=col_axis)
elif ord == Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).max(axis=row_axis)
elif ord == -1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).min(axis=col_axis)
elif ord == -Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).min(axis=row_axis)
elif ord in [None, 'fro', 'f']:
ret = sqrt(add.reduce((x.conj() * x).real, axis=axis))
elif ord == 'nuc':
ret = _multi_svd_norm(x, row_axis, col_axis, sum)
else:
raise ValueError("Invalid norm order for matrices.")
if keepdims:
ret_shape = list(x.shape)
ret_shape[axis[0]] = 1
ret_shape[axis[1]] = 1
ret = ret.reshape(ret_shape)
return ret
else:
raise ValueError("Improper number of dimensions to norm.")
# multi_dot
def multi_dot(arrays):
"""
Compute the dot product of two or more arrays in a single function call,
while automatically selecting the fastest evaluation order.
`multi_dot` chains `numpy.dot` and uses optimal parenthesization
of the matrices [1]_ [2]_. Depending on the shapes of the matrices,
this can speed up the multiplication a lot.
If the first argument is 1-D it is treated as a row vector.
If the last argument is 1-D it is treated as a column vector.
The other arguments must be 2-D.
Think of `multi_dot` as::
def multi_dot(arrays): return functools.reduce(np.dot, arrays)
Parameters
----------
arrays : sequence of array_like
If the first argument is 1-D it is treated as row vector.
If the last argument is 1-D it is treated as column vector.
The other arguments must be 2-D.
Returns
-------
output : ndarray
Returns the dot product of the supplied arrays.
See Also
--------
dot : dot multiplication with two arguments.
References
----------
.. [1] Cormen, "Introduction to Algorithms", Chapter 15.2, p. 370-378
.. [2] http://en.wikipedia.org/wiki/Matrix_chain_multiplication
Examples
--------
`multi_dot` allows you to write::
>>> from numpy.linalg import multi_dot
>>> # Prepare some data
>>> A = np.random.random(10000, 100)
>>> B = np.random.random(100, 1000)
>>> C = np.random.random(1000, 5)
>>> D = np.random.random(5, 333)
>>> # the actual dot multiplication
>>> multi_dot([A, B, C, D])
instead of::
>>> np.dot(np.dot(np.dot(A, B), C), D)
>>> # or
>>> A.dot(B).dot(C).dot(D)
Example: multiplication costs of different parenthesizations
------------------------------------------------------------
The cost for a matrix multiplication can be calculated with the
following function::
def cost(A, B): return A.shape[0] * A.shape[1] * B.shape[1]
Let's assume we have three matrices
:math:`A_{10x100}, B_{100x5}, C_{5x50}$`.
The costs for the two different parenthesizations are as follows::
cost((AB)C) = 10*100*5 + 10*5*50 = 5000 + 2500 = 7500
cost(A(BC)) = 10*100*50 + 100*5*50 = 50000 + 25000 = 75000
"""
n = len(arrays)
# optimization only makes sense for len(arrays) > 2
if n < 2:
raise ValueError("Expecting at least two arrays.")
elif n == 2:
return dot(arrays[0], arrays[1])
arrays = [asanyarray(a) for a in arrays]
# save original ndim to reshape the result array into the proper form later
ndim_first, ndim_last = arrays[0].ndim, arrays[-1].ndim
# Explicitly convert vectors to 2D arrays to keep the logic of the internal
# _multi_dot_* functions as simple as possible.
if arrays[0].ndim == 1:
arrays[0] = atleast_2d(arrays[0])
if arrays[-1].ndim == 1:
arrays[-1] = atleast_2d(arrays[-1]).T
_assertRank2(*arrays)
# _multi_dot_three is much faster than _multi_dot_matrix_chain_order
if n == 3:
result = _multi_dot_three(arrays[0], arrays[1], arrays[2])
else:
order = _multi_dot_matrix_chain_order(arrays)
result = _multi_dot(arrays, order, 0, n - 1)
# return proper shape
if ndim_first == 1 and ndim_last == 1:
return result[0, 0] # scalar
elif ndim_first == 1 or ndim_last == 1:
return result.ravel() # 1-D
else:
return result
def _multi_dot_three(A, B, C):
"""
Find the best order for three arrays and do the multiplication.
For three arguments `_multi_dot_three` is approximately 15 times faster
than `_multi_dot_matrix_chain_order`
"""
# cost1 = cost((AB)C)
cost1 = (A.shape[0] * A.shape[1] * B.shape[1] + # (AB)
A.shape[0] * B.shape[1] * C.shape[1]) # (--)C
# cost2 = cost((AB)C)
cost2 = (B.shape[0] * B.shape[1] * C.shape[1] + # (BC)
A.shape[0] * A.shape[1] * C.shape[1]) # A(--)
if cost1 < cost2:
return dot(dot(A, B), C)
else:
return dot(A, dot(B, C))
def _multi_dot_matrix_chain_order(arrays, return_costs=False):
"""
Return a np.array that encodes the optimal order of mutiplications.
The optimal order array is then used by `_multi_dot()` to do the
multiplication.
Also return the cost matrix if `return_costs` is `True`
The implementation CLOSELY follows Cormen, "Introduction to Algorithms",
Chapter 15.2, p. 370-378. Note that Cormen uses 1-based indices.
cost[i, j] = min([
cost[prefix] + cost[suffix] + cost_mult(prefix, suffix)
for k in range(i, j)])
"""
n = len(arrays)
# p stores the dimensions of the matrices
# Example for p: A_{10x100}, B_{100x5}, C_{5x50} --> p = [10, 100, 5, 50]
p = [a.shape[0] for a in arrays] + [arrays[-1].shape[1]]
# m is a matrix of costs of the subproblems
# m[i,j]: min number of scalar multiplications needed to compute A_{i..j}
m = zeros((n, n), dtype=double)
# s is the actual ordering
# s[i, j] is the value of k at which we split the product A_i..A_j
s = empty((n, n), dtype=intp)
for l in range(1, n):
for i in range(n - l):
j = i + l
m[i, j] = Inf
for k in range(i, j):
q = m[i, k] + m[k+1, j] + p[i]*p[k+1]*p[j+1]
if q < m[i, j]:
m[i, j] = q
s[i, j] = k # Note that Cormen uses 1-based index
return (s, m) if return_costs else s
def _multi_dot(arrays, order, i, j):
"""Actually do the multiplication with the given order."""
if i == j:
return arrays[i]
else:
return dot(_multi_dot(arrays, order, i, order[i, j]),
_multi_dot(arrays, order, order[i, j] + 1, j))
| bsd-3-clause |
MIREL-UNC/wikipedia-ner | svm.py | 1 | 1752 | #!/usr/bin/env python
#-*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import argparse
import numpy as np
import os
import pandas as pd
import sys
from sklearn.externals import joblib
from sklearn.linear_model import SGDClassifier
from wikipedianer.dataset import HandcraftedFeaturesDataset
from wikipedianer.pipeline.util import CL_ITERATIONS
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('dataset_path',
type=str)
parser.add_argument('labels_path',
type=str)
parser.add_argument('indices_path',
type=str)
parser.add_argument('results_path',
type=str)
args = parser.parse_args()
dataset = HandcraftedFeaturesDataset(args.dataset_path, args.labels_path, args.indices_path)
for iidx, iteration in enumerate(CL_ITERATIONS[:-1]):
print('Running for iteration %s' % iteration, file=sys.stderr)
model = SGDClassifier(verbose=1, n_jobs=12)
model.fit(dataset.train_dataset, dataset.train_labels[:, iidx])
print('Getting results', file=sys.stderr)
y_true = dataset.test_labels[:, iidx]
y_pred = model.predict(dataset.test_dataset)
results = pd.DataFrame(np.vstack([y_true, y_pred]).T, columns=['true', 'prediction'])
print('Saving results', file=sys.stderr)
results.to_csv(os.path.join(args.results_path, 'test_predictions_SVM_%s.csv' % iteration), index=False)
print('Saving model', file=sys.stderr)
joblib.dump(model, os.path.join(args.results_path, 'model_SVM_%s.pkl' % iteration))
print('Finished all iterations', file=sys.stderr)
| gpl-3.0 |
Safery/RSX-Tracker | v0.0/RSX_TrackerV2.py | 1 | 4286 | import matplotlib.pyplot as plt
from matplotlib.offsetbox import TextArea, DrawingArea, OffsetImage, \
AnnotationBbox
from matplotlib.cbook import get_sample_data
import matplotlib.image as mpimg
import numpy as np
fig, ax = plt.subplots()
class RSX_Mapper():
'''Initiate RSX_Mapper Class'''
def __init__(self, rcord=None, lcord=None):
'''(RSX_Mapper, [float, float], [float, float]) -> NoneType
'''
self.MapName = []
self.rcord = []
self.lcord = []
if ((rcord == None) or (lcord == None)):
cords = get_cord()
self.rcord = cords[0]
self.lcord = cords[1]
return None
else:
self.rcord = rcord[0]
self.lcord = lcord[1]
ax.set_xlim(float(lcord[0]),float(rcord[0]))
ax.set_ylim(float(lcord[1]),float(rcord[1]))
return None
def __str__(self):
'''(RSX_Mapper) -> str
'''
pass
def set_custom_cord(self, rcord, lcord):
'''
'''
ax.set_xlim(float(lcord[0]),float(rcord[0]))
ax.set_ylim(float(lcord[1]),float(rcord[1]))
self.lcord = [float(lcord[0]),float(rcord[0])]
self.rcord = [float(lcord[1]),float(rcord[1])]
plt.draw()
plt.show()
return None
def set_range(self, auto=True, ticks=None):
'''
'''
if (auto == True):
ax.set_xticks([0.000964875])
ax.set_yticks([0.000964875])
else:
ax.set_xticks([float(ticks)])
ax.set_yticks([float(ticks)])
return None
def set_img(self, longi=None, lat=None, MapName='map.png', lcord, rcord):
'''
'''
if ((longi == None) or (lat == None)):
get_inp = input('Must provide Longitude and Latitude [Lat, Longi]\n >>> ')
longi=get_inp[1]
lat=[0]
if (MapName in self.MapName):
get_inp = input('Map Image already exists. Continue (y/n)?\n >>> ')
if (get_inp == 'y'):
img = mpimg.imread("img/"+str(MapName))
imagebox = OffsetImage(img, zoom=0.5)
ab = AnnotationBbox(imagebox,
(longi, lat), xybox=(0, 0),
xycoords='data',
boxcoords="offset points",
pad=0, frameon=False)
self.MapName.append("img/"+str(MapName))
ax.add_artist(ab)
plt.draw()
else:
return 'No Map Image added.'
else:
'''img = mpimg.imread("img/"+str(MapName))
imagebox = OffsetImage(img, zoom=0.5)
ab = AnnotationBbox(imagebox, (longi, lat),
xybox=(0, 0),
xycoords='data',
boxcoords="offset points", pad=0, frameon=False)
self.MapName.append("img/"+str(MapName))
ax.add_artist(ab)
plt.draw()
'''
img = mpimg.imread("img/"+str(MapName))
plt.imshow(img, extent = [float(lcord[0]),float(rcord[0]),float(lcord[1]),float(rcord[1])])
plt.show()
def get_cord():
'''
'''
# Gets the top right Coordinate (Longitude and Latitude)
_top_rcord = []
get_input = raw_input('What is the top right Coordinate?\n>>> ')
_top_rcord.append(get_input[:int(str(get_input.find(',')))])
_top_rcord.append(get_input[int(str(get_input.find(',')))+1:])
# Gets the bottom left Coordinate (Longitude and Latitude)
_bottom_rcord = []
get_input = raw_input('What is the bottom left Coordinate?\n>>> ')
_bottom_rcord.append(get_input[:int(str(get_input.find(',')))])
_bottom_rcord.append(get_input[int(str(get_input.find(',')))+1:])
# Sets the axis length
ax.set_xlim(float(_bottom_rcord[0]),float(_top_rcord[0]))
ax.set_ylim(float(_bottom_rcord[1]),float(_top_rcord[1]))
return [_top_rcord, _bottom_rcord]
| mit |
ptitjano/bokeh | bokeh/charts/utils.py | 1 | 20165 | """ This is the utils module that collects convenience functions and code that are
useful for charts ecosystem.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
import itertools
import json
from collections import OrderedDict, defaultdict
from copy import copy
from math import cos, sin
from colorsys import hsv_to_rgb
from pandas.io.json import json_normalize
import pandas as pd
import numpy as np
from six import iteritems
from ..models.glyphs import (
Asterisk, Circle, CircleCross, CircleX, Cross, Diamond, DiamondCross,
InvertedTriangle, Square, SquareCross, SquareX, Triangle, X)
from ..models.sources import ColumnDataSource
from ..plotting.helpers import DEFAULT_PALETTE
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
DEFAULT_COLUMN_NAMES = 'abcdefghijklmnopqrstuvwxyz'
# map between distinct set of marker names and marker classes
marker_types = OrderedDict(
[
("circle", Circle),
("square", Square),
("triangle", Triangle),
("diamond", Diamond),
("inverted_triangle", InvertedTriangle),
("asterisk", Asterisk),
("cross", Cross),
("x", X),
("circle_cross", CircleCross),
("circle_x", CircleX),
("square_x", SquareX),
("square_cross", SquareCross),
("diamond_cross", DiamondCross),
]
)
def take(n, iterable):
"""Return first n items of the iterable as a list."""
return itertools.islice(iterable, n)
def cycle_colors(chunk, palette=DEFAULT_PALETTE):
""" Build a color list just cycling through a given palette.
Args:
chuck (seq): the chunk of elements to generate the color list
palette (seq[color]) : a palette of colors to cycle through
Returns:
colors
"""
colors = []
g = itertools.cycle(palette)
for i in range(len(chunk)):
colors.append(next(g))
return colors
def polar_to_cartesian(r, start_angles, end_angles):
"""Translate polar coordinates to cartesian.
Args:
r (float): radial coordinate
start_angles (list(float)): list of start angles
end_angles (list(float)): list of end_angles angles
Returns:
x, y points
"""
cartesian = lambda r, alpha: (r*cos(alpha), r*sin(alpha))
points = []
for r, start, end in zip(r, start_angles, end_angles):
points.append(cartesian(r, (end + start)/2))
return zip(*points)
def ordered_set(iterable):
"""Creates an ordered list from strings, tuples or other hashable items.
Returns:
list of unique and ordered values
"""
mmap = {}
ord_set = []
for item in iterable:
# Save unique items in input order
if item not in mmap:
mmap[item] = 1
ord_set.append(item)
return ord_set
def collect_attribute_columns(**specs):
"""Collect list of unique and ordered columns across attribute specifications.
Args:
specs (dict): attribute name, :class:`AttrSpec` mapping
Returns:
list of columns in order as they appear in attr spec and without duplicates
"""
# filter down to only the specs with columns assigned to them
selected_specs = {spec_name: spec for spec_name, spec in iteritems(specs)
if spec.columns}
# all columns used in selections of attribute specifications
spec_cols = list(itertools.chain.from_iterable([spec.columns
for spec in selected_specs.values()]))
# return a list of unique columns in order as they appear
return ordered_set(spec_cols)
def df_from_json(data, rename=True, **kwargs):
"""Attempt to produce :class:`pandas.DataFrame` from hierarchical json-like data.
This utility wraps the :func:`pandas.io.json.json_normalize` function and by
default will try to rename the columns produced by it.
Args:
data (str or list(dict) or dict(list(dict))): a path to json data or loaded json
data. This function will look into the data and try to parse it correctly
based on common structures of json data.
rename (bool, optional: try to rename column hierarchy to the base name. So
medals.bronze would end up being bronze. This will only rename to the base
column name if the name is unique, and only if the pandas json parser
produced columns that have a '.' in the column name.
**kwargs: any kwarg supported by :func:`pandas.io.json.json_normalize`
Returns:
a parsed pandas dataframe from the json data, unless the path does not exist,
the input data is nether a list or dict. In that case, it will return `None`.
"""
parsed = None
if isinstance(data, str):
with open(data) as data_file:
data = json.load(data_file)
if isinstance(data, list):
parsed = json_normalize(data)
elif isinstance(data, dict):
for k, v in iteritems(data):
if isinstance(v, list):
parsed = json_normalize(v)
# try to rename the columns if configured to
if rename and parsed is not None:
parsed = denormalize_column_names(parsed)
return parsed
def denormalize_column_names(parsed_data):
"""Attempts to remove the column hierarchy if possible when parsing from json.
Args:
parsed_data (:class:`pandas.DataFrame`): df parsed from json data using
:func:`pandas.io.json.json_normalize`.
Returns:
dataframe with updated column names
"""
cols = parsed_data.columns.tolist()
base_columns = defaultdict(list)
for col in cols:
if '.' in col:
# get last split of '.' to get primary column name
base_columns[col].append(col.split('.')[-1])
rename = {}
# only rename columns if they don't overlap another base column name
for col, new_cols in iteritems(base_columns):
if len(new_cols) == 1:
rename[col] = new_cols[0]
if len(list(rename.keys())) > 0:
return parsed_data.rename(columns=rename)
else:
return parsed_data
def get_index(data):
"""A generic function to return the index from values.
Should be used to abstract away from specific types of data.
Args:
data (:class:`pandas.Series`, :class:`pandas.DataFrame`): a data source to
return or derive an index for.
Returns:
a pandas index
"""
return data.index
def get_unity(data, value=1):
"""Returns a column of ones with the same length as input data.
Useful for charts that need this special data type when no input is provided
for one of the dimensions.
Args:
data (:class:`pandas.DataFrame`): the data to add constant column to.
value (str, int, object): a valid value for a dataframe, used as constant value
for each row.
Returns:
a copy of `data` with a column of '_charts_ones' added to it
"""
data_copy = data.copy()
data_copy['_charts_ones'] = value
return data_copy['_charts_ones']
special_columns = {'index': get_index,
'unity': get_unity}
def title_from_columns(cols):
"""Creates standard string representation of columns.
If cols is None, then None is returned.
"""
if cols is not None:
cols_title = copy(cols)
if not isinstance(cols_title, list):
cols_title = [cols_title]
return str(', '.join(cols_title).title()).title()
else:
return None
def gen_column_names(n):
"""Produces list of unique column names of length n.
Args:
n (int): count of column names to provide
Returns:
list(str) of length `n`
"""
col_names = list(DEFAULT_COLUMN_NAMES)
# a-z
if n < len(col_names):
return list(take(n, col_names))
# a-z and aa-zz (500+ columns)
else:
n_left = n - len(col_names)
labels = [''.join(item) for item in
take(n_left, itertools.product(DEFAULT_COLUMN_NAMES,
DEFAULT_COLUMN_NAMES))]
col_names.extend(labels)
return col_names
def generate_patch_base(x, y, base=0.0):
""" Adds base to the start and end of y, and extends x to match the length.
Args:
x (`pandas.Series`): x values for the area chart
y (`pandas.Series`): y values for the area chart
base (float): the flat side of the area glyph
Returns:
x, y: tuple containing padded x and y as `numpy.ndarray`
"""
x = x.values
y = y.values
# add base of area by starting and ending at base
y0 = np.insert(y, 0, base)
y0 = np.append(y0, base)
# make sure y is same length as x
x0 = np.insert(x, 0, x[0])
x0 = np.append(x0, x0[-1])
return x0, y0
class ChartHelp(object):
"""Builds, formats, and displays help for the chart function"""
def __init__(self, *builders):
self.builders = builders
def __repr__(self):
help_str = ''
for builder in self.builders:
help_str += builder.generate_help()
return help_str
def help(*builders):
"""Adds a ChartHelp object to the help attribute of the function."""
def add_help(f):
f.help = ChartHelp(*builders)
return f
return add_help
def derive_aggregation(dim_cols, agg_col, agg):
"""Produces consistent aggregation spec from optional column specification.
This utility provides some consistency to the flexible inputs that can be provided
to charts, such as not specifying dimensions to aggregate on, not specifying an
aggregation, and/or not specifying a column to aggregate on.
"""
if dim_cols == 'index' or agg_col == 'index' or dim_cols is None:
agg = None
agg_col = None
elif agg_col is None:
if isinstance(dim_cols, list):
agg_col = dim_cols[0]
else:
agg_col = dim_cols
agg = 'count'
return agg_col, agg
def build_wedge_source(df, cat_cols, agg_col=None, agg='mean', level_width=0.5,
level_spacing=0.01):
df = cat_to_polar(df, cat_cols, agg_col, agg, level_width)
add_wedge_spacing(df, level_spacing)
df['centers'] = df['outers'] - (df['outers'] - df['inners']) / 2.0
# scale level 0 text position towards outside of wedge if center is not a donut
if not isinstance(level_spacing, list):
df.ix[df['level'] == 0, 'centers'] *= 1.5
return df
def shift_series(s):
"""Produces a copy of the provided series shifted by one, starting with 0."""
s0 = s.copy()
s0 = s0.shift(1)
s0.iloc[0] = 0.0
return s0
def _create_start_end(levels):
"""Produces wedge start and end values from list of dataframes for each level.
Returns:
start, end: two series describing starting and ending angles in radians
"""
rads = levels[0].copy()
for level in levels[1:]:
rads = rads * level
rads *= (2 * np.pi)
end = rads.cumsum()
start = shift_series(end)
return start, end
def cat_to_polar(df, cat_cols, agg_col=None, agg='mean', level_width=0.5):
"""Return start and end angles for each index in series.
Returns:
df: a `pandas.DataFrame` describing each aggregated wedge
"""
agg_col, agg = derive_aggregation(cat_cols, agg_col, agg)
def calc_span_proportion(data):
"""How much of the circle should be assigned."""
return data/data.sum()
# group by each level
levels_cols = []
starts = []
ends = []
levels = []
agg_values = []
for i in range(0, len(cat_cols)):
level_cols = cat_cols[:i+1]
if agg_col is not None and agg is not None:
gb = getattr(getattr(df.groupby(level_cols), agg_col), agg)()
else:
cols = [col for col in df.columns if col != 'index']
gb = df[cols[0]]
# lower than top level, need to groupby next to lowest level
group_level = i - 1
if group_level >= 0:
levels.append(gb.groupby(level=group_level).apply(calc_span_proportion))
else:
levels.append(calc_span_proportion(gb))
start_ends = _create_start_end(levels)
starts.append(start_ends[0])
ends.append(start_ends[1])
agg_values.append(gb)
# build array of constant value representing the level
this_level = start_ends[0].copy()
this_level[:] = i
levels_cols.append(this_level)
df = pd.DataFrame({'start': pd.concat(starts),
'end': pd.concat(ends),
'level': pd.concat(levels_cols),
'values': pd.concat(agg_values)})
if len(cat_cols) > 1:
idx = df.index.copy().values
for i, val in enumerate(df.index):
if not isinstance(val, tuple):
val = (val, '')
idx[i] = val
df.index = pd.MultiIndex.from_tuples(idx)
df.index.names = cat_cols
# sort the index to avoid performance warning (might alter chart)
df.sortlevel(inplace=True)
inners, outers = calc_wedge_bounds(df['level'], level_width)
df['inners'] = inners
df['outers'] = outers
return df
def add_text_label_from_index(df):
"""Add column for text label, based on level-oriented index.
This is used for the donut chart, where there is a hierarchy of categories,
which are separated and encoded into the index of the data. If there are
3 levels (columns) used, then a 3 level multi-index is used. Level 0 will
have each of the values of the first column, then NaNs for the next two. The
last non-empty level is used for the label of that row.
"""
text = []
for idx in df.index:
row_text = ''
if isinstance(idx, tuple):
# the lowest, non-empty index is the label
for lev in reversed(idx):
if lev is not '' and row_text == '':
row_text = str(lev)
else:
row_text = str(idx)
text.append(row_text)
df['text'] = text
return df
def build_wedge_text_source(df, start_col='start', end_col='end',
center_col='centers'):
"""Generate `ColumnDataSource` for text representation of donut levels.
Returns a data source with 3 columns, 'text', 'x', and 'y', where 'text'
is a derived label from the `~pandas.MultiIndex` provided in `df`.
"""
x, y = polar_to_cartesian(df[center_col], df[start_col], df[end_col])
# extract text from the levels in index
df = add_text_label_from_index(df)
df['text_angle'] = calc_text_angle(df['start'], df['end'])
df.ix[df.level == 0, 'text_angle'] = 0.0
text_source = ColumnDataSource(dict(text=df['text'], x=x, y=y,
text_angle=df['text_angle']))
return text_source
def calc_text_angle(start, end):
"""Produce a column of text angle values based on the bounds of the wedge."""
text_angle = (start + end) / 2.0
shift_angles = ((text_angle > (np.pi / 2)) & (text_angle < (3 * np.pi / 2)))
text_angle[shift_angles] = text_angle[shift_angles] + np.pi
return text_angle
def calc_wedge_bounds(levels, level_width):
"""Calculate inner and outer radius bounds of the donut wedge based on levels."""
# add columns for the inner and outer size of the wedge glyph
inners = levels * level_width
outers = inners + level_width
return inners, outers
def add_wedge_spacing(df, spacing):
"""Add spacing to the `inners` column of the provided data based on level."""
# add spacing based on input settings
if isinstance(spacing, list):
# add spacing for each level given in order received
for i, space in enumerate(spacing):
df.ix[df['level'] == i, 'inners'] += space
else:
df.ix[df['level'] > 0, 'inners'] += spacing
def build_hover_tooltips(hover_spec=None, chart_cols=None):
"""Produce tooltips for column dimensions used in chart configuration.
Provides convenience for producing tooltips for data with labeled columns. If you
had two bars in a bar chart, one for female and one for male, you may also want to
have the tooltip say "Sex: female" and "Sex: male" when hovering.
Args:
hover_spec (bool, list(tuple(str, str), list(str), optional): either can be a
valid input to the `HoverTool` tooltips kwarg, or a boolean `True` to have
all dimensions specified in chart be added to the tooltip, or a list of
columns that you do want to be included in the tooltips.
chart_cols:
Returns:
list(tuple(str, str)): list of tooltips
"""
if isinstance(hover_spec, bool):
tooltips = [(col, '@' + col) for col in chart_cols]
elif isinstance(hover_spec[0], tuple):
tooltips = hover_spec
else:
tooltips = [(col, '@' + col) for col in hover_spec]
return tooltips
def build_agg_tooltip(hover_text=None, agg_text=None, aggregated_col=None):
"""Produce a consistent tooltip based on available chart configuration.
Args:
hover_text (str, optional): the desired label for the value to be shown in the
tooltip
agg_text (str, optional): any aggregation text used for the chart
aggregated_col (str, optional): any column name used for aggregation
Returns:
tuple(str, str): a single tooltip
"""
if hover_text is None:
if agg_text is None:
if isinstance(aggregated_col, str):
hover_text = aggregated_col
else:
hover_text = 'value'
else:
hover_text = agg_text
if isinstance(aggregated_col, str):
hover_text = '%s of %s' % (hover_text, aggregated_col)
return hover_text.title(), "@values"
def label_from_index_dict(chart_index, include_cols=False):
"""
Args:
chart_index (dict(str, any) or str or None): identifier for the data group,
representing either the value of a column (str), no grouping (None), or a dict
where each key represents a column, and the value is the unique value.
Returns:
str: a derived label representing the chart index value
"""
if isinstance(chart_index, str):
return chart_index
elif chart_index is None:
return 'None'
elif isinstance(chart_index, dict):
if include_cols:
label = ', '.join(['%s=%s' % (col, val) for col, val in iteritems(
chart_index)])
else:
label = tuple(chart_index.values())
if len(label) == 1:
label = label[0]
return label
else:
raise ValueError('chart_index type is not recognized, \
received %s' % type(chart_index))
def comp_glyphs_to_df(*comp_glyphs):
dfs = [glyph.df for glyph in comp_glyphs]
return pd.concat(dfs)
def color_in_equal_space(hue, saturation=0.55, value=2.3):
"""
Args:
hue (int or double): a numerical value that you want to assign a color
Returns:
str: hexadecimal color value to a given number
"""
golden_ratio = (1 + 5 ** 0.5) / 2
hue += golden_ratio
hue %= 1
return '#{:02X}{:02X}{:02X}'.format(*tuple(int(a*100) for a in hsv_to_rgb(hue, saturation, value)))
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.